code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from flask import Flask
from flask import request
from flask import session
from flask import jsonify
from flask import make_response
import mariadb
import datetime
import json
import scad_utils
testing: bool = True
if testing:
fake_datetime = datetime.datetime(2020, 8, 7, 15, 10)
app = Flask(__name__)
app.config["SECRET_KEY"] = "clave ultra secreta"
app.permanent_session_lifetime = datetime.timedelta(minutes=20)
teacher_time_tolerance = datetime.timedelta(minutes=20)
db = mariadb.ConnectionPool(
user="brocolio",
password="brocolio",
host="localhost",
pool_name="pul",
pool_size=20,
database="scad",
)
# tmp_cursor: mysql.cursor.MySQLCursor = db.cursor()
# tmp_cursor.execute("SET lc_time_names = 'es_PE';")
# tmp_cursor.close()
spanish_days: dict = {
"Monday": "lunes",
"Tuesday": "martes",
"Wednesday": "miércoles",
"Thursday": "jueves",
"Friday": "viernes",
"Saturday": "sábado",
"Sunday": "domingo",
}
json.JSONEncoder.default = lambda self, obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date)
else str(obj)
)
@app.route("/login", methods=["POST"])
def login() -> dict:
db_connection = db.get_connection()
db_cursor = db_connection.cursor(named_tuple=True)
data: dict = request.get_json()
# consulta a la base de datos si el usuario y contrasena son validos
# consulta en la tabla docente
query: str = (
"select DocenteDNI, Nombre, Apellido, Usuario "
"from Docente "
"where Usuario=? and Contrasena=?"
)
db_cursor.execute(query, (data["Usuario"], data["Contrasena"]))
rows = db_cursor.fetchall()
if len(rows) == 1:
session.permanent = True
session["account_type"] = "Docente"
session["DocenteDNI"] = rows[0].DocenteDNI
session["Nombre"] = rows[0].Nombre
session["Apellido"] = rows[0].Apellido
session["Usuario"] = rows[0].Usuario
db_cursor.close()
db_connection.close()
return make_response({"account_type": session["account_type"]}, 200)
else:
# consulta en la tabla administrador
query: str = (
"select Usuario,Contrasena "
"from Administrador "
"where Usuario=? and Contrasena=?"
)
db_cursor.execute(query, (data["Usuario"], data["Contrasena"]))
rows = db_cursor.fetchall()
if len(rows) == 1:
session.permanent = True
session["account_type"] = "Administrador"
session["Usuario"] = rows[0].Usuario
db_cursor.close()
db_connection.close()
return make_response({"account_type": session["account_type"]}, 200)
# no se encontro nada
else:
db_cursor.close()
db_connection.close()
return make_response("pos a lo mejor se equivoco?", 401)
@app.route("/teacher_fullname", methods=["GET"])
def teacherFullname() -> dict:
if "account_type" not in session:
return make_response("pa que quieres saber eso jaja salu2", 401)
elif session["account_type"] == "Docente":
return {"Nombre": session["Nombre"], "Apellido": session["Apellido"]}
elif session["account_type"] == "Administrador":
return make_response("wey no!!!", 400)
@app.route("/time", methods=["GET"])
def time() -> dict:
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
return {
"date": current_datetime.strftime("%d/%m/%Y"),
"time": current_datetime.strftime("%H,%M,%S"),
}
@app.route("/teacher_course_list", methods=["GET"])
def teacherCourseList() -> list:
# verificar la sesion
if "account_type" not in session:
# no inicio sesion
return make_response("nope", 401)
elif session["account_type"] == "Docente":
# consultar la lista de cursos y si se han marcado o no
# un curso marcado se diferencia porque el valor de Hora de la tabla Marcacion
# es diferente de NULL
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
db_cursor.execute("SET lc_time_names = 'es_PE'")
query: str = (
"select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"where Dia=dayname(?) and DocenteDNI=? "
)
db_cursor.execute(
query, (current_datetime.strftime("%Y/%m/%d"), session["DocenteDNI"])
)
today_assigned_courses: list = db_cursor.fetchall()
# se formatea la lista de cursos
today_assigned_courses = scad_utils.rowToDict(
(
"AsignacionCursoID",
"CursoNombre",
"HoraInicio",
"HoraFin",
"Pabellon",
"Numero",
),
today_assigned_courses,
)
if len(today_assigned_courses) > 0:
existence_check_query: str = (
"select * from Marcacion " "where Fecha=? and AsignacionCursoID=?"
)
for course in today_assigned_courses:
db_cursor.execute(
existence_check_query,
(
current_datetime.strftime("%Y/%m/%d"),
course["AsignacionCursoID"],
),
)
if len(db_cursor.fetchall()) > 0:
course["state"] = "marked"
else:
if current_datetime >= scad_utils.timeToDatetime(
course["HoraInicio"], current_datetime
):
if (
current_datetime
- scad_utils.timeToDatetime(
course["HoraInicio"], current_datetime
)
<= teacher_time_tolerance
):
course["state"] = "mark_now"
else:
course["state"] = "not_marked"
else:
course["state"] = "waiting"
db_cursor.close()
db_connection.close()
return jsonify(today_assigned_courses)
elif session["account_type"] == "Administrador":
# el administrador no deberia usar este servicio
return make_response("ya nos jakiaron", 400)
@app.route("/teacher_mark", methods=["POST"])
def teacherMark() -> dict:
# validar si es posible marcar el registro del curso
if "account_type" not in session:
# no inicio sesion
return make_response("stap", 401)
elif session["account_type"] == "Docente":
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
# consultar si hay algun curso para marcar
course_to_mark: dict
db_connection = db.get_connection()
db_cursor = db_connection.cursor(named_tuple=True)
db_cursor.execute("SET lc_time_names = 'es_PE'")
query: str = (
"select AsignacionCursoID,SalonID "
"from AsignacionCurso "
"where DocenteDNI=? "
"and Dia=dayname(?) "
"and HoraInicio <=? "
"and timediff(?,HoraInicio)<=?;"
)
db_cursor.execute(
query,
(
session["DocenteDNI"],
current_datetime.strftime("%Y/%m/%d"),
current_datetime.strftime("%H:%M:%S"),
current_datetime.strftime("%H:%M:%S"),
str(teacher_time_tolerance),
),
)
course_to_mark = db_cursor.fetchall()
if len(course_to_mark) == 1:
insertion_query: str = ("insert into Marcacion() " "values(?,?,?,?);")
db_cursor.execute(
insertion_query,
(
int(course_to_mark[0].AsignacionCursoID),
current_datetime.strftime("%Y/%m/%d"),
current_datetime.strftime("%H:%M:%S"),
int(course_to_mark[0].SalonID),
),
)
db_cursor.close()
db_connection.close()
return make_response("se marco la asistencia", 200)
else:
db_cursor.close()
db_connection.close()
return make_response("ya es tarde", 406)
elif session["account_type"] == "Administrador":
return make_response(
"papu, si ya nos jakiaste por lo menos usa los servicios correctos no?", 400
)
@app.route("/admin_get_report", methods=["GET"])
def adminGetReport() -> list:
if "account_type" not in session:
# no inicio sesion
return make_response("nope", 401)
elif session["account_type"] == "Administrador":
time_range = request.get_json()["time_range"]
if testing:
current_datetime = fake_datetime
else:
current_datetime = datetime.datetime.now()
db_connection = db.get_connection()
db_cursor = db_connection.cursor(named_tuple=True)
db_cursor.execute("SET lc_time_names = 'es_PE'")
report: list
if time_range == "today":
query: str = (
"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, "
"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"inner join Docente d using(DocenteDNI) "
"where Dia=dayname(?) and a.HoraInicio<? "
)
db_cursor.execute(
query,
(
current_datetime.strftime("%Y-%m-%d"),
current_datetime.strftime("%H:%M:%S"),
),
)
report = db_cursor.fetchall()
# se formatea la lista de cursos
report = scad_utils.rowToDict(
(
"AsignacionCursoID",
"DocenteDNI",
"Nombre",
"Apellido",
"CursoNombre",
"HoraInicio",
"HoraFin",
"Pabellon",
"Numero",
),
report,
)
if len(report) > 0:
existence_check_query: str = (
"select * from Marcacion " "where Fecha=? and AsignacionCursoID=?"
)
for assignment in report:
db_cursor.execute(
existence_check_query,
(
current_datetime.strftime("%Y-%m-%d"),
assignment["AsignacionCursoID"],
),
)
if len(db_cursor.fetchall()) > 0:
assignment["state"] = "marked"
else:
assignment["state"] = "not_marked"
db_cursor.close()
db_connection.close()
return make_response(jsonify(report), 200)
elif time_range == "yesterday":
query: str = (
"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, "
"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"inner join Docente d using(DocenteDNI) "
"where Dia=dayname(?)"
)
current_datetime -= datetime.timedelta(days=1)
db_cursor.execute(
query, (current_datetime.strftime("%Y-%m-%d"),),
)
report = db_cursor.fetchall()
# se formatea la lista de cursos
report = scad_utils.rowToDict(
(
"AsignacionCursoID",
"DocenteDNI",
"Nombre",
"Apellido",
"CursoNombre",
"HoraInicio",
"HoraFin",
"Pabellon",
"Numero",
),
report,
)
if len(report) > 0:
existence_check_query: str = (
"select * from Marcacion " "where Fecha=? and AsignacionCursoID=?"
)
for assignment in report:
db_cursor.execute(
existence_check_query,
(
current_datetime.strftime("%Y-%m-%d"),
assignment["AsignacionCursoID"],
),
)
if len(db_cursor.fetchall()) > 0:
assignment["state"] = "marked"
else:
assignment["state"] = "not_marked"
db_cursor.close()
db_connection.close()
return make_response(jsonify(report), 200)
elif time_range == "this_week":
pass
elif time_range == "this_month":
pass
elif time_range == "all":
pass
else:
return make_response("peticion invalida", 406)
elif session["account_type"] == "Docente":
# el administrador no deberia usar este servicio
return make_response("ya nos jakiaron", 400)
@app.route("/admin_add_teacher", methods=["POST"])
def adminAddTeacher() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
data = request.get_json()
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("insert into Docente() values(?,?,?,?,?)")
db_cursor.execute(
query,
(
data["DocenteDNI"],
data["Nombre"],
data["Apellido"],
data["Usuario"],
data["Contrasena"],
),
)
db_cursor.close()
db_connection.close()
return make_response("se agrego la entrada", 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_teacher_table", methods=["GET"])
def adminGetTeacherTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("select * from Docente")
db_cursor.execute(query)
teacher_table = scad_utils.rowToDict(
("DocenteDNI", "Nombre", "Apellido", "Usuario", "Contrasena"),
db_cursor.fetchall(),
)
db_cursor.close()
db_connection.close()
return make_response(jsonify(teacher_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_course_table", methods=["GET"])
def adminGetCourseTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("select * from Curso")
db_cursor.execute(query)
course_table = scad_utils.rowToDict(
("CursoNombre", "FechaInicio", "FechaFin"), db_cursor.fetchall(),
)
for course in course_table:
course["FechaInicio"] = course["FechaInicio"].isoformat()
course["FechaFin"] = course["FechaFin"].isoformat()
db_cursor.close()
db_connection.close()
return make_response(jsonify(course_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_classroom_table", methods=["GET"])
def adminGetClassroomTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = ("select Pabellon,Numero from Salon")
db_cursor.execute(query)
classroom_table = scad_utils.rowToDict(
("Pabellon", "Numero"), db_cursor.fetchall(),
)
db_cursor.close()
db_connection.close()
return make_response(jsonify(classroom_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/admin_get_course_assignment_table", methods=["GET"])
def adminGetCourseAssignmentTable() -> dict:
if "account_type" not in session:
return make_response("", 401)
elif session["account_type"] == "Administrador":
db_connection = db.get_connection()
db_cursor = db_connection.cursor()
query: str = (
"select d.DocenteDNI, d.Nombre, d.Apellido,"
"a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia "
"from AsignacionCurso a "
"inner join Salon s using(SalonID) "
"inner join Docente d using(DocenteDNI)"
)
db_cursor.execute(query)
course_assignment_table = scad_utils.rowToDict(
(
"DocenteDNI",
"Nombre",
"Apellido",
"CursoNombre",
"Pabellon",
"Numero",
"HoraInicio",
"HoraFin",
"Dia",
),
db_cursor.fetchall(),
)
db_cursor.close()
db_connection.close()
return make_response(jsonify(course_assignment_table), 200)
elif session["account_type"] == "Docente":
return make_response("", 401)
@app.route("/logout", methods=["DELETE"])
def logout() -> dict:
if "account_type" not in session:
return make_response("primero inicia session broz", 301)
else:
if session["account_type"] == "Docente":
session.pop("Usuario")
session.pop("Nombre")
session.pop("Apellido")
return make_response("hasta luego prosor", 200)
elif session["account_type"] == "Administrador":
session.pop("Usuario")
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
return make_response("espero haberle sido util, hasta luego", 200)
|
normal
|
{
"blob_id": "ff6b7e2097d78b013f8f5989adee47156579cb9e",
"index": 6226,
"step-1": "<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\n<mask token>\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n",
"step-2": "<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\n<mask token>\n\n\[email protected]('/admin_get_report', methods=['GET'])\ndef adminGetReport() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Administrador':\n time_range = request.get_json()['time_range']\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == 'today':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),\n current_datetime.strftime('%H:%M:%S')))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'yesterday':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)'\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'this_week':\n pass\n elif time_range == 'this_month':\n pass\n elif time_range == 'all':\n pass\n else:\n return make_response('peticion invalida', 406)\n elif session['account_type'] == 'Docente':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n",
"step-3": "<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/teacher_mark', methods=['POST'])\ndef teacherMark() ->dict:\n if 'account_type' not in session:\n return make_response('stap', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n course_to_mark: dict\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID,SalonID from AsignacionCurso where DocenteDNI=? and Dia=dayname(?) and HoraInicio <=? and timediff(?,HoraInicio)<=?;'\n )\n db_cursor.execute(query, (session['DocenteDNI'], current_datetime.\n strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'),\n current_datetime.strftime('%H:%M:%S'), str(teacher_time_tolerance))\n )\n course_to_mark = db_cursor.fetchall()\n if len(course_to_mark) == 1:\n insertion_query: str = 'insert into Marcacion() values(?,?,?,?);'\n db_cursor.execute(insertion_query, (int(course_to_mark[0].\n AsignacionCursoID), current_datetime.strftime('%Y/%m/%d'),\n current_datetime.strftime('%H:%M:%S'), int(course_to_mark[0\n ].SalonID)))\n db_cursor.close()\n db_connection.close()\n return make_response('se marco la asistencia', 200)\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('ya es tarde', 406)\n elif session['account_type'] == 'Administrador':\n return make_response(\n 'papu, si ya nos jakiaste por lo menos usa los servicios correctos no?'\n , 400)\n\n\[email protected]('/admin_get_report', methods=['GET'])\ndef adminGetReport() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Administrador':\n time_range = request.get_json()['time_range']\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == 'today':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),\n current_datetime.strftime('%H:%M:%S')))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'yesterday':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)'\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'this_week':\n pass\n elif time_range == 'this_month':\n pass\n elif time_range == 'all':\n pass\n else:\n return make_response('peticion invalida', 406)\n elif session['account_type'] == 'Docente':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n",
"step-4": "<mask token>\ntesting: bool = True\nif testing:\n fake_datetime = datetime.datetime(2020, 8, 7, 15, 10)\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'clave ultra secreta'\napp.permanent_session_lifetime = datetime.timedelta(minutes=20)\nteacher_time_tolerance = datetime.timedelta(minutes=20)\ndb = mariadb.ConnectionPool(user='brocolio', password='brocolio', host=\n 'localhost', pool_name='pul', pool_size=20, database='scad')\nspanish_days: dict = {'Monday': 'lunes', 'Tuesday': 'martes', 'Wednesday':\n 'miércoles', 'Thursday': 'jueves', 'Friday': 'viernes', 'Saturday':\n 'sábado', 'Sunday': 'domingo'}\njson.JSONEncoder.default = lambda self, obj: obj.isoformat() if isinstance(obj,\n datetime.datetime) or isinstance(obj, datetime.date) else str(obj)\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/teacher_mark', methods=['POST'])\ndef teacherMark() ->dict:\n if 'account_type' not in session:\n return make_response('stap', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n course_to_mark: dict\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID,SalonID from AsignacionCurso where DocenteDNI=? and Dia=dayname(?) and HoraInicio <=? and timediff(?,HoraInicio)<=?;'\n )\n db_cursor.execute(query, (session['DocenteDNI'], current_datetime.\n strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'),\n current_datetime.strftime('%H:%M:%S'), str(teacher_time_tolerance))\n )\n course_to_mark = db_cursor.fetchall()\n if len(course_to_mark) == 1:\n insertion_query: str = 'insert into Marcacion() values(?,?,?,?);'\n db_cursor.execute(insertion_query, (int(course_to_mark[0].\n AsignacionCursoID), current_datetime.strftime('%Y/%m/%d'),\n current_datetime.strftime('%H:%M:%S'), int(course_to_mark[0\n ].SalonID)))\n db_cursor.close()\n db_connection.close()\n return make_response('se marco la asistencia', 200)\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('ya es tarde', 406)\n elif session['account_type'] == 'Administrador':\n return make_response(\n 'papu, si ya nos jakiaste por lo menos usa los servicios correctos no?'\n , 400)\n\n\[email protected]('/admin_get_report', methods=['GET'])\ndef adminGetReport() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Administrador':\n time_range = request.get_json()['time_range']\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == 'today':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),\n current_datetime.strftime('%H:%M:%S')))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'yesterday':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)'\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'this_week':\n pass\n elif time_range == 'this_month':\n pass\n elif time_range == 'all':\n pass\n else:\n return make_response('peticion invalida', 406)\n elif session['account_type'] == 'Docente':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n",
"step-5": "from flask import Flask\nfrom flask import request\nfrom flask import session\nfrom flask import jsonify\nfrom flask import make_response\nimport mariadb\nimport datetime\nimport json\nimport scad_utils\n\ntesting: bool = True\nif testing:\n fake_datetime = datetime.datetime(2020, 8, 7, 15, 10)\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"clave ultra secreta\"\napp.permanent_session_lifetime = datetime.timedelta(minutes=20)\n\nteacher_time_tolerance = datetime.timedelta(minutes=20)\ndb = mariadb.ConnectionPool(\n user=\"brocolio\",\n password=\"brocolio\",\n host=\"localhost\",\n pool_name=\"pul\",\n pool_size=20,\n database=\"scad\",\n)\n\n# tmp_cursor: mysql.cursor.MySQLCursor = db.cursor()\n# tmp_cursor.execute(\"SET lc_time_names = 'es_PE';\")\n# tmp_cursor.close()\nspanish_days: dict = {\n \"Monday\": \"lunes\",\n \"Tuesday\": \"martes\",\n \"Wednesday\": \"miércoles\",\n \"Thursday\": \"jueves\",\n \"Friday\": \"viernes\",\n \"Saturday\": \"sábado\",\n \"Sunday\": \"domingo\",\n}\n\n\njson.JSONEncoder.default = lambda self, obj: (\n obj.isoformat()\n if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date)\n else str(obj)\n)\n\n\[email protected](\"/login\", methods=[\"POST\"])\ndef login() -> dict:\n\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n\n # consulta a la base de datos si el usuario y contrasena son validos\n # consulta en la tabla docente\n query: str = (\n \"select DocenteDNI, Nombre, Apellido, Usuario \"\n \"from Docente \"\n \"where Usuario=? and Contrasena=?\"\n )\n db_cursor.execute(query, (data[\"Usuario\"], data[\"Contrasena\"]))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session[\"account_type\"] = \"Docente\"\n session[\"DocenteDNI\"] = rows[0].DocenteDNI\n session[\"Nombre\"] = rows[0].Nombre\n session[\"Apellido\"] = rows[0].Apellido\n session[\"Usuario\"] = rows[0].Usuario\n\n db_cursor.close()\n db_connection.close()\n return make_response({\"account_type\": session[\"account_type\"]}, 200)\n\n else:\n # consulta en la tabla administrador\n query: str = (\n \"select Usuario,Contrasena \"\n \"from Administrador \"\n \"where Usuario=? and Contrasena=?\"\n )\n db_cursor.execute(query, (data[\"Usuario\"], data[\"Contrasena\"]))\n rows = db_cursor.fetchall()\n\n if len(rows) == 1:\n session.permanent = True\n session[\"account_type\"] = \"Administrador\"\n session[\"Usuario\"] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({\"account_type\": session[\"account_type\"]}, 200)\n # no se encontro nada\n else:\n db_cursor.close()\n db_connection.close()\n return make_response(\"pos a lo mejor se equivoco?\", 401)\n\n\[email protected](\"/teacher_fullname\", methods=[\"GET\"])\ndef teacherFullname() -> dict:\n if \"account_type\" not in session:\n return make_response(\"pa que quieres saber eso jaja salu2\", 401)\n elif session[\"account_type\"] == \"Docente\":\n return {\"Nombre\": session[\"Nombre\"], \"Apellido\": session[\"Apellido\"]}\n elif session[\"account_type\"] == \"Administrador\":\n return make_response(\"wey no!!!\", 400)\n\n\[email protected](\"/time\", methods=[\"GET\"])\ndef time() -> dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {\n \"date\": current_datetime.strftime(\"%d/%m/%Y\"),\n \"time\": current_datetime.strftime(\"%H,%M,%S\"),\n }\n\n\[email protected](\"/teacher_course_list\", methods=[\"GET\"])\ndef teacherCourseList() -> list:\n # verificar la sesion\n if \"account_type\" not in session:\n # no inicio sesion\n return make_response(\"nope\", 401)\n elif session[\"account_type\"] == \"Docente\":\n # consultar la lista de cursos y si se han marcado o no\n # un curso marcado se diferencia porque el valor de Hora de la tabla Marcacion\n # es diferente de NULL\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n \"select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"where Dia=dayname(?) and DocenteDNI=? \"\n )\n db_cursor.execute(\n query, (current_datetime.strftime(\"%Y/%m/%d\"), session[\"DocenteDNI\"])\n )\n today_assigned_courses: list = db_cursor.fetchall()\n # se formatea la lista de cursos\n today_assigned_courses = scad_utils.rowToDict(\n (\n \"AsignacionCursoID\",\n \"CursoNombre\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Pabellon\",\n \"Numero\",\n ),\n today_assigned_courses,\n )\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n \"select * from Marcacion \" \"where Fecha=? and AsignacionCursoID=?\"\n )\n for course in today_assigned_courses:\n db_cursor.execute(\n existence_check_query,\n (\n current_datetime.strftime(\"%Y/%m/%d\"),\n course[\"AsignacionCursoID\"],\n ),\n )\n if len(db_cursor.fetchall()) > 0:\n course[\"state\"] = \"marked\"\n else:\n if current_datetime >= scad_utils.timeToDatetime(\n course[\"HoraInicio\"], current_datetime\n ):\n if (\n current_datetime\n - scad_utils.timeToDatetime(\n course[\"HoraInicio\"], current_datetime\n )\n <= teacher_time_tolerance\n ):\n course[\"state\"] = \"mark_now\"\n else:\n course[\"state\"] = \"not_marked\"\n else:\n course[\"state\"] = \"waiting\"\n\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n\n elif session[\"account_type\"] == \"Administrador\":\n # el administrador no deberia usar este servicio\n return make_response(\"ya nos jakiaron\", 400)\n\n\[email protected](\"/teacher_mark\", methods=[\"POST\"])\ndef teacherMark() -> dict:\n # validar si es posible marcar el registro del curso\n if \"account_type\" not in session:\n # no inicio sesion\n return make_response(\"stap\", 401)\n elif session[\"account_type\"] == \"Docente\":\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n # consultar si hay algun curso para marcar\n course_to_mark: dict\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n \"select AsignacionCursoID,SalonID \"\n \"from AsignacionCurso \"\n \"where DocenteDNI=? \"\n \"and Dia=dayname(?) \"\n \"and HoraInicio <=? \"\n \"and timediff(?,HoraInicio)<=?;\"\n )\n db_cursor.execute(\n query,\n (\n session[\"DocenteDNI\"],\n current_datetime.strftime(\"%Y/%m/%d\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n str(teacher_time_tolerance),\n ),\n )\n course_to_mark = db_cursor.fetchall()\n if len(course_to_mark) == 1:\n insertion_query: str = (\"insert into Marcacion() \" \"values(?,?,?,?);\")\n\n db_cursor.execute(\n insertion_query,\n (\n int(course_to_mark[0].AsignacionCursoID),\n current_datetime.strftime(\"%Y/%m/%d\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n int(course_to_mark[0].SalonID),\n ),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(\"se marco la asistencia\", 200)\n else:\n db_cursor.close()\n db_connection.close()\n return make_response(\"ya es tarde\", 406)\n\n elif session[\"account_type\"] == \"Administrador\":\n return make_response(\n \"papu, si ya nos jakiaste por lo menos usa los servicios correctos no?\", 400\n )\n\n\[email protected](\"/admin_get_report\", methods=[\"GET\"])\ndef adminGetReport() -> list:\n if \"account_type\" not in session:\n # no inicio sesion\n return make_response(\"nope\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n time_range = request.get_json()[\"time_range\"]\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == \"today\":\n query: str = (\n \"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, \"\n \"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"inner join Docente d using(DocenteDNI) \"\n \"where Dia=dayname(?) and a.HoraInicio<? \"\n )\n db_cursor.execute(\n query,\n (\n current_datetime.strftime(\"%Y-%m-%d\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n ),\n )\n report = db_cursor.fetchall()\n # se formatea la lista de cursos\n report = scad_utils.rowToDict(\n (\n \"AsignacionCursoID\",\n \"DocenteDNI\",\n \"Nombre\",\n \"Apellido\",\n \"CursoNombre\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Pabellon\",\n \"Numero\",\n ),\n report,\n )\n if len(report) > 0:\n existence_check_query: str = (\n \"select * from Marcacion \" \"where Fecha=? and AsignacionCursoID=?\"\n )\n for assignment in report:\n db_cursor.execute(\n existence_check_query,\n (\n current_datetime.strftime(\"%Y-%m-%d\"),\n assignment[\"AsignacionCursoID\"],\n ),\n )\n if len(db_cursor.fetchall()) > 0:\n assignment[\"state\"] = \"marked\"\n else:\n assignment[\"state\"] = \"not_marked\"\n\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == \"yesterday\":\n query: str = (\n \"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, \"\n \"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"inner join Docente d using(DocenteDNI) \"\n \"where Dia=dayname(?)\"\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(\n query, (current_datetime.strftime(\"%Y-%m-%d\"),),\n )\n report = db_cursor.fetchall()\n # se formatea la lista de cursos\n report = scad_utils.rowToDict(\n (\n \"AsignacionCursoID\",\n \"DocenteDNI\",\n \"Nombre\",\n \"Apellido\",\n \"CursoNombre\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Pabellon\",\n \"Numero\",\n ),\n report,\n )\n if len(report) > 0:\n existence_check_query: str = (\n \"select * from Marcacion \" \"where Fecha=? and AsignacionCursoID=?\"\n )\n for assignment in report:\n db_cursor.execute(\n existence_check_query,\n (\n current_datetime.strftime(\"%Y-%m-%d\"),\n assignment[\"AsignacionCursoID\"],\n ),\n )\n if len(db_cursor.fetchall()) > 0:\n assignment[\"state\"] = \"marked\"\n else:\n assignment[\"state\"] = \"not_marked\"\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == \"this_week\":\n pass\n elif time_range == \"this_month\":\n pass\n elif time_range == \"all\":\n pass\n else:\n return make_response(\"peticion invalida\", 406)\n elif session[\"account_type\"] == \"Docente\":\n # el administrador no deberia usar este servicio\n return make_response(\"ya nos jakiaron\", 400)\n\n\[email protected](\"/admin_add_teacher\", methods=[\"POST\"])\ndef adminAddTeacher() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"insert into Docente() values(?,?,?,?,?)\")\n db_cursor.execute(\n query,\n (\n data[\"DocenteDNI\"],\n data[\"Nombre\"],\n data[\"Apellido\"],\n data[\"Usuario\"],\n data[\"Contrasena\"],\n ),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(\"se agrego la entrada\", 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_teacher_table\", methods=[\"GET\"])\ndef adminGetTeacherTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"select * from Docente\")\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(\n (\"DocenteDNI\", \"Nombre\", \"Apellido\", \"Usuario\", \"Contrasena\"),\n db_cursor.fetchall(),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_course_table\", methods=[\"GET\"])\ndef adminGetCourseTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"select * from Curso\")\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(\n (\"CursoNombre\", \"FechaInicio\", \"FechaFin\"), db_cursor.fetchall(),\n )\n for course in course_table:\n course[\"FechaInicio\"] = course[\"FechaInicio\"].isoformat()\n course[\"FechaFin\"] = course[\"FechaFin\"].isoformat()\n\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_classroom_table\", methods=[\"GET\"])\ndef adminGetClassroomTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"select Pabellon,Numero from Salon\")\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(\n (\"Pabellon\", \"Numero\"), db_cursor.fetchall(),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_course_assignment_table\", methods=[\"GET\"])\ndef adminGetCourseAssignmentTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\n \"select d.DocenteDNI, d.Nombre, d.Apellido,\"\n \"a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"inner join Docente d using(DocenteDNI)\"\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(\n (\n \"DocenteDNI\",\n \"Nombre\",\n \"Apellido\",\n \"CursoNombre\",\n \"Pabellon\",\n \"Numero\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Dia\",\n ),\n db_cursor.fetchall(),\n )\n\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/logout\", methods=[\"DELETE\"])\ndef logout() -> dict:\n if \"account_type\" not in session:\n return make_response(\"primero inicia session broz\", 301)\n else:\n if session[\"account_type\"] == \"Docente\":\n session.pop(\"Usuario\")\n session.pop(\"Nombre\")\n session.pop(\"Apellido\")\n return make_response(\"hasta luego prosor\", 200)\n elif session[\"account_type\"] == \"Administrador\":\n session.pop(\"Usuario\")\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n",
"step-ids": [
10,
11,
12,
14,
16
]
}
|
[
10,
11,
12,
14,
16
] |
from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
class Project(models.Model):
project_id = models.IntegerField(primary_key=True)
project_name = models.CharField(max_length=50)
project_description = models.CharField(max_length=200, blank=True, null=True)
project_address = models.CharField(max_length=100, blank=True, null=True)
project_city = models.CharField(max_length=50, blank=True, null=True)
project_pincode = models.CharField(max_length=10, blank=True, null=True)
project_status = models.CharField(max_length=10, blank=True, null=True)
class Meta:
db_table = 'project'
managed = True
class Facility(models.Model):
facility_id = models.IntegerField(primary_key=True)
facility_name = models.CharField(max_length=50)
facility_description = models.CharField(max_length=100, blank=True, null=True)
project = models.ForeignKey('Project', models.DO_NOTHING, null=True)
locked_for_edit = models.BooleanField(blank=True, null=True)
class Meta:
db_table = 'facility'
managed = True
class Zone(models.Model):
zone_id = models.AutoField(primary_key=True)
zone_name = models.CharField(max_length=20)
zone_description = models.CharField(max_length=100, blank=True, null=True)
facility = models.ForeignKey(Facility, models.DO_NOTHING, null=True)
class Meta:
db_table = 'zone'
managed = True
|
normal
|
{
"blob_id": "2783fc24806c323ab4ac44fbac55eef73142ab80",
"index": 7710,
"step-1": "<mask token>\n\n\nclass Facility(models.Model):\n facility_id = models.IntegerField(primary_key=True)\n facility_name = models.CharField(max_length=50)\n facility_description = models.CharField(max_length=100, blank=True,\n null=True)\n project = models.ForeignKey('Project', models.DO_NOTHING, null=True)\n locked_for_edit = models.BooleanField(blank=True, null=True)\n\n\n class Meta:\n db_table = 'facility'\n managed = True\n\n\nclass Zone(models.Model):\n zone_id = models.AutoField(primary_key=True)\n zone_name = models.CharField(max_length=20)\n zone_description = models.CharField(max_length=100, blank=True, null=True)\n facility = models.ForeignKey(Facility, models.DO_NOTHING, null=True)\n\n\n class Meta:\n db_table = 'zone'\n managed = True\n",
"step-2": "<mask token>\n\n\nclass Project(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'project'\n managed = True\n\n\nclass Facility(models.Model):\n facility_id = models.IntegerField(primary_key=True)\n facility_name = models.CharField(max_length=50)\n facility_description = models.CharField(max_length=100, blank=True,\n null=True)\n project = models.ForeignKey('Project', models.DO_NOTHING, null=True)\n locked_for_edit = models.BooleanField(blank=True, null=True)\n\n\n class Meta:\n db_table = 'facility'\n managed = True\n\n\nclass Zone(models.Model):\n zone_id = models.AutoField(primary_key=True)\n zone_name = models.CharField(max_length=20)\n zone_description = models.CharField(max_length=100, blank=True, null=True)\n facility = models.ForeignKey(Facility, models.DO_NOTHING, null=True)\n\n\n class Meta:\n db_table = 'zone'\n managed = True\n",
"step-3": "<mask token>\n\n\nclass Project(models.Model):\n project_id = models.IntegerField(primary_key=True)\n project_name = models.CharField(max_length=50)\n project_description = models.CharField(max_length=200, blank=True, null\n =True)\n project_address = models.CharField(max_length=100, blank=True, null=True)\n project_city = models.CharField(max_length=50, blank=True, null=True)\n project_pincode = models.CharField(max_length=10, blank=True, null=True)\n project_status = models.CharField(max_length=10, blank=True, null=True)\n\n\n class Meta:\n db_table = 'project'\n managed = True\n\n\nclass Facility(models.Model):\n facility_id = models.IntegerField(primary_key=True)\n facility_name = models.CharField(max_length=50)\n facility_description = models.CharField(max_length=100, blank=True,\n null=True)\n project = models.ForeignKey('Project', models.DO_NOTHING, null=True)\n locked_for_edit = models.BooleanField(blank=True, null=True)\n\n\n class Meta:\n db_table = 'facility'\n managed = True\n\n\nclass Zone(models.Model):\n zone_id = models.AutoField(primary_key=True)\n zone_name = models.CharField(max_length=20)\n zone_description = models.CharField(max_length=100, blank=True, null=True)\n facility = models.ForeignKey(Facility, models.DO_NOTHING, null=True)\n\n\n class Meta:\n db_table = 'zone'\n managed = True\n",
"step-4": "from django.db import models\nfrom django.db import models\n\n\nclass Project(models.Model):\n project_id = models.IntegerField(primary_key=True)\n project_name = models.CharField(max_length=50)\n project_description = models.CharField(max_length=200, blank=True, null\n =True)\n project_address = models.CharField(max_length=100, blank=True, null=True)\n project_city = models.CharField(max_length=50, blank=True, null=True)\n project_pincode = models.CharField(max_length=10, blank=True, null=True)\n project_status = models.CharField(max_length=10, blank=True, null=True)\n\n\n class Meta:\n db_table = 'project'\n managed = True\n\n\nclass Facility(models.Model):\n facility_id = models.IntegerField(primary_key=True)\n facility_name = models.CharField(max_length=50)\n facility_description = models.CharField(max_length=100, blank=True,\n null=True)\n project = models.ForeignKey('Project', models.DO_NOTHING, null=True)\n locked_for_edit = models.BooleanField(blank=True, null=True)\n\n\n class Meta:\n db_table = 'facility'\n managed = True\n\n\nclass Zone(models.Model):\n zone_id = models.AutoField(primary_key=True)\n zone_name = models.CharField(max_length=20)\n zone_description = models.CharField(max_length=100, blank=True, null=True)\n facility = models.ForeignKey(Facility, models.DO_NOTHING, null=True)\n\n\n class Meta:\n db_table = 'zone'\n managed = True\n",
"step-5": "from django.db import models\n\n# Create your models here.\nfrom django.db import models\n\n# Create your models here.\nclass Project(models.Model):\n project_id = models.IntegerField(primary_key=True)\n project_name = models.CharField(max_length=50)\n project_description = models.CharField(max_length=200, blank=True, null=True)\n project_address = models.CharField(max_length=100, blank=True, null=True)\n project_city = models.CharField(max_length=50, blank=True, null=True)\n project_pincode = models.CharField(max_length=10, blank=True, null=True)\n project_status = models.CharField(max_length=10, blank=True, null=True)\n\n class Meta:\n db_table = 'project'\n managed = True\n\n\n\nclass Facility(models.Model):\n facility_id = models.IntegerField(primary_key=True)\n facility_name = models.CharField(max_length=50)\n facility_description = models.CharField(max_length=100, blank=True, null=True)\n project = models.ForeignKey('Project', models.DO_NOTHING, null=True)\n locked_for_edit = models.BooleanField(blank=True, null=True)\n\n class Meta:\n db_table = 'facility'\n managed = True\n\n\n\nclass Zone(models.Model):\n zone_id = models.AutoField(primary_key=True)\n zone_name = models.CharField(max_length=20)\n zone_description = models.CharField(max_length=100, blank=True, null=True)\n facility = models.ForeignKey(Facility, models.DO_NOTHING, null=True)\n\n class Meta:\n db_table = 'zone'\n managed = True\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
""" <html>
<body>
<p>Generated {0}</p>
</body>
</html>"""
.format(datetime.now()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from datetime import datetime
print(
""" <html>
<body>
<p>Generated {0}</p>
</body>
</html>"""
.format(datetime.now()))
<|reserved_special_token_1|>
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""""""""""""""""""""""""""""""""""""""""""""""
" Filename: time.py
"
" Author: xss - [email protected]
" Description: Show local time
" Create: 2018-07-02 20:20:17
"""""""""""""""""""""""""""""""""""""""""""""""
from datetime import datetime
print('''\
<html>
<body>
<p>Generated {0}</p>
</body>
</html>'''.format(datetime.now()))
|
flexible
|
{
"blob_id": "e8eac1e4433eee769d317de9ba81d5181168fdca",
"index": 6293,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\" <html>\n <body>\n <p>Generated {0}</p>\n </body>\n </html>\"\"\"\n .format(datetime.now()))\n",
"step-3": "<mask token>\nfrom datetime import datetime\nprint(\n \"\"\" <html>\n <body>\n <p>Generated {0}</p>\n </body>\n </html>\"\"\"\n .format(datetime.now()))\n",
"step-4": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" Filename: time.py\n\"\n\" Author: xss - [email protected]\n\" Description: Show local time\n\" Create: 2018-07-02 20:20:17\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nfrom datetime import datetime\n\n\nprint('''\\\n <html>\n <body>\n <p>Generated {0}</p>\n </body>\n </html>'''.format(datetime.now()))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
'''
Generate tree search dot file
'''
import copy
# Colors supported by graphviz, in some pleasing order
colors = {
"fa": "brown",
"fb": "brown1",
"ea": "cadetblue",
"eb": "cadetblue1",
"pa": "orange",
"pb": "orange4"
}
curId = 1
capAset = 4
capBset = 7
goal = 2
def export_dot():
# helper functions
def getColor(node):
(a, b) = node["state"]
if a == goal or b == goal:
return "red"
return "black"
def getLabel(node):
if node["leaf"]:
return "{} \n cost:{}".format(node["state"], node["cost"])
else:
return node["state"]
print """digraph searchTree {
size = "8,8";
node [ shape=oval, style=filled, fillcolor=lightblue2 ] ;
edge [fontname="Helvetica"];
splines=curved;
"""
(nodes, edges) = getGraph()
for n in nodes:
print "{} [label=\"{}\", color={}, penwidth=2];".format(
n["id"], getLabel(n), getColor(n))
for (x, y, action) in edges:
print "{} -> {} [xlabel=\"{}\",color={}]".format(
x, y, action, colors[action])
print "}"
def getGraph():
tree = bfs(capAset, capBset, goal)
nodes = [v for k, v in tree.items()]
edges = [(n["parent"], n["id"], n["action"]) for n in nodes
if n["parent"] != -1]
hasChild = set()
for node in nodes:
hasChild.add(node["parent"])
for node in nodes:
if node["id"] in hasChild:
node["leaf"] = False
else:
node["leaf"] = True
return (nodes, edges)
def bfs(capA, capB, goal):
#helper functions
def fillA(state):
(a, b) = state["state"]
global curId
curId += 1
ans = {
"state": (capA, b),
"cost": state["cost"] + capA - a,
"id": curId,
"parent": state["id"],
"action": "fa",
"visited": copy.deepcopy(state["visited"])
}
return ans
def fillB(state):
(a, b) = state["state"]
global curId
curId += 1
ans = {
"state": (a, capB),
"cost": state["cost"] + capB - b,
"id": curId,
"parent": state["id"],
"action": "fb",
"visited": copy.deepcopy(state["visited"])
}
return ans
def emptyA(state):
(a, b) = state["state"]
global curId
curId += 1
ans = {
"state": (0, b),
"cost": state["cost"],
"id": curId,
"parent": state["id"],
"action": "ea",
"visited": copy.deepcopy(state["visited"])
}
return ans
def emptyB(state):
(a, b) = state["state"]
global curId
curId += 1
ans = {
"state": (a, 0),
"cost": state["cost"],
"id": curId,
"parent": state["id"],
"action": "eb",
"visited": copy.deepcopy(state["visited"])
}
return ans
def pourA(state):
(a, b) = state["state"]
global curId
curId += 1
ans = {
"state": (lambda x, y: (x + y - capB, capB)
if x + y > capB else (0, x + y))
(a, b),
"cost": state["cost"],
"id": curId,
"parent": state["id"],
"action": "pa",
"visited": copy.deepcopy(state["visited"])
}
return ans
def pourB(state):
(a, b) = state["state"]
global curId
curId += 1
ans = {
"state": (lambda x, y: (capA, x + y - capA)
if x + y > capA else (x + y, 0))
(a, b),
"cost": state["cost"],
"id": curId,
"parent": state["id"],
"action": "pb",
"visited": copy.deepcopy(state["visited"])
}
return ans
initState = {
"state": (0, 0),
"cost": 0,
"id": 0,
"parent": -1,
"action": "Nothing",
"visited": set()
}
queue = []
queue.append(initState)
tree = dict()
while queue:
state = queue.pop(0)
(a, b) = state["state"]
#check if visited
if ((a, b) == (0, 0) and curId != 1) or (a, b) == (capA, capB):
continue
if (a, b) in state["visited"]:
#tree[state["id"]] = state
continue
if a == goal or b == goal:
tree[state["id"]] = state
break
else:
tree[state["id"]] = state
state["visited"].add((a, b))
# fill A
if a != capA:
queue.append(fillA(state))
# fill B
if b != capB:
queue.append(fillB(state))
# empty A
if a > 0:
queue.append(emptyA(state))
# empty B
if b > 0:
queue.append(emptyB(state))
# pour A to B
if a > 0 and b != capB:
queue.append(pourA(state))
# pour B to A
if b > 0 and a != capA:
queue.append(pourB(state))
return tree
def main():
export_dot()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "599da0f045ab5c2b3f568def3d89452b56cac029",
"index": 9083,
"step-1": "#!/usr/bin/env python\n\n'''\n Generate tree search dot file\n'''\nimport copy\n\n# Colors supported by graphviz, in some pleasing order\ncolors = {\n \"fa\": \"brown\",\n \"fb\": \"brown1\",\n \"ea\": \"cadetblue\",\n \"eb\": \"cadetblue1\",\n \"pa\": \"orange\",\n \"pb\": \"orange4\"\n}\n\ncurId = 1\ncapAset = 4\ncapBset = 7\ngoal = 2\n\n\ndef export_dot():\n # helper functions\n def getColor(node):\n (a, b) = node[\"state\"]\n if a == goal or b == goal:\n return \"red\"\n return \"black\"\n\n def getLabel(node):\n if node[\"leaf\"]:\n return \"{} \\n cost:{}\".format(node[\"state\"], node[\"cost\"])\n else:\n return node[\"state\"]\n\n print \"\"\"digraph searchTree {\n size = \"8,8\";\n node [ shape=oval, style=filled, fillcolor=lightblue2 ] ;\n edge [fontname=\"Helvetica\"];\n splines=curved;\n\"\"\"\n (nodes, edges) = getGraph()\n for n in nodes:\n print \"{} [label=\\\"{}\\\", color={}, penwidth=2];\".format(\n n[\"id\"], getLabel(n), getColor(n))\n for (x, y, action) in edges:\n print \"{} -> {} [xlabel=\\\"{}\\\",color={}]\".format(\n x, y, action, colors[action])\n print \"}\"\n\n\ndef getGraph():\n tree = bfs(capAset, capBset, goal)\n nodes = [v for k, v in tree.items()]\n edges = [(n[\"parent\"], n[\"id\"], n[\"action\"]) for n in nodes\n if n[\"parent\"] != -1]\n hasChild = set()\n for node in nodes:\n hasChild.add(node[\"parent\"])\n for node in nodes:\n if node[\"id\"] in hasChild:\n node[\"leaf\"] = False\n else:\n node[\"leaf\"] = True\n return (nodes, edges)\n\n\ndef bfs(capA, capB, goal):\n #helper functions\n def fillA(state):\n (a, b) = state[\"state\"]\n global curId\n curId += 1\n ans = {\n \"state\": (capA, b),\n \"cost\": state[\"cost\"] + capA - a,\n \"id\": curId,\n \"parent\": state[\"id\"],\n \"action\": \"fa\",\n \"visited\": copy.deepcopy(state[\"visited\"])\n }\n return ans\n\n def fillB(state):\n (a, b) = state[\"state\"]\n global curId\n curId += 1\n ans = {\n \"state\": (a, capB),\n \"cost\": state[\"cost\"] + capB - b,\n \"id\": curId,\n \"parent\": state[\"id\"],\n \"action\": \"fb\",\n \"visited\": copy.deepcopy(state[\"visited\"])\n }\n return ans\n\n def emptyA(state):\n (a, b) = state[\"state\"]\n global curId\n curId += 1\n ans = {\n \"state\": (0, b),\n \"cost\": state[\"cost\"],\n \"id\": curId,\n \"parent\": state[\"id\"],\n \"action\": \"ea\",\n \"visited\": copy.deepcopy(state[\"visited\"])\n }\n return ans\n\n def emptyB(state):\n (a, b) = state[\"state\"]\n global curId\n curId += 1\n ans = {\n \"state\": (a, 0),\n \"cost\": state[\"cost\"],\n \"id\": curId,\n \"parent\": state[\"id\"],\n \"action\": \"eb\",\n \"visited\": copy.deepcopy(state[\"visited\"])\n }\n return ans\n\n def pourA(state):\n (a, b) = state[\"state\"]\n global curId\n curId += 1\n ans = {\n \"state\": (lambda x, y: (x + y - capB, capB)\n if x + y > capB else (0, x + y))\n (a, b),\n \"cost\": state[\"cost\"],\n \"id\": curId,\n \"parent\": state[\"id\"],\n \"action\": \"pa\",\n \"visited\": copy.deepcopy(state[\"visited\"])\n }\n return ans\n\n def pourB(state):\n (a, b) = state[\"state\"]\n global curId\n curId += 1\n ans = {\n \"state\": (lambda x, y: (capA, x + y - capA)\n if x + y > capA else (x + y, 0))\n (a, b),\n \"cost\": state[\"cost\"],\n \"id\": curId,\n \"parent\": state[\"id\"],\n \"action\": \"pb\",\n \"visited\": copy.deepcopy(state[\"visited\"])\n }\n return ans\n\n initState = {\n \"state\": (0, 0),\n \"cost\": 0,\n \"id\": 0,\n \"parent\": -1,\n \"action\": \"Nothing\",\n \"visited\": set()\n }\n queue = []\n queue.append(initState)\n tree = dict()\n\n while queue:\n state = queue.pop(0)\n (a, b) = state[\"state\"]\n #check if visited\n if ((a, b) == (0, 0) and curId != 1) or (a, b) == (capA, capB):\n continue\n if (a, b) in state[\"visited\"]:\n #tree[state[\"id\"]] = state\n continue\n\n if a == goal or b == goal:\n tree[state[\"id\"]] = state\n break\n else:\n tree[state[\"id\"]] = state\n state[\"visited\"].add((a, b))\n # fill A\n if a != capA:\n queue.append(fillA(state))\n # fill B\n if b != capB:\n queue.append(fillB(state))\n # empty A\n if a > 0:\n queue.append(emptyA(state))\n # empty B\n if b > 0:\n queue.append(emptyB(state))\n # pour A to B\n if a > 0 and b != capB:\n queue.append(pourA(state))\n # pour B to A\n if b > 0 and a != capA:\n queue.append(pourB(state))\n\n return tree\n\n\ndef main():\n export_dot()\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def game_manager(info_list):
dictionary = {}
for piece_info in info_list:
piece_info = piece_info.split('||')
piece_info[2] = int(piece_info[2])
if piece_info[2] not in dictionary:
dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}
dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))
print(dictionary)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def game_manager(info_list):
dictionary = {}
for piece_info in info_list:
piece_info = piece_info.split('||')
piece_info[2] = int(piece_info[2])
if piece_info[2] not in dictionary:
dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}
dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))
print(dictionary)
<|reserved_special_token_0|>
game_manager(info)
<|reserved_special_token_1|>
def game_manager(info_list):
dictionary = {}
for piece_info in info_list:
piece_info = piece_info.split('||')
piece_info[2] = int(piece_info[2])
if piece_info[2] not in dictionary:
dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}
dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))
print(dictionary)
info = ['Final Fantasy VII||SCEA||1997',
'Mirror’s Edge||Electronic Arts||2008', 'GTA 4||Rockstar Games||2008',
'Grandia||SCEA||1997', 'Half Life 2||Valve||2004']
game_manager(info)
<|reserved_special_token_1|>
def game_manager(info_list):
dictionary = {}
for piece_info in info_list:
piece_info = piece_info.split('||')
piece_info[2] = int(piece_info[2])
if piece_info[2] not in dictionary:
dictionary[piece_info[2]] = {(piece_info[1],piece_info[0])}
dictionary[piece_info[2]].add((piece_info[1],piece_info[0]))
print(dictionary)
info = ['Final Fantasy VII||SCEA||1997','Mirror’s Edge||Electronic Arts||2008','GTA 4||Rockstar Games||2008','Grandia||SCEA||1997', \
'Half Life 2||Valve||2004']
game_manager(info)
|
flexible
|
{
"blob_id": "a382edb861a43ac3065a781ea996a8d1dd819954",
"index": 6649,
"step-1": "<mask token>\n",
"step-2": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))\n print(dictionary)\n\n\n<mask token>\n",
"step-3": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))\n print(dictionary)\n\n\n<mask token>\ngame_manager(info)\n",
"step-4": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))\n print(dictionary)\n\n\ninfo = ['Final Fantasy VII||SCEA||1997',\n 'Mirror’s Edge||Electronic Arts||2008', 'GTA 4||Rockstar Games||2008',\n 'Grandia||SCEA||1997', 'Half Life 2||Valve||2004']\ngame_manager(info)\n",
"step-5": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1],piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1],piece_info[0]))\n print(dictionary)\n\n\ninfo = ['Final Fantasy VII||SCEA||1997','Mirror’s Edge||Electronic Arts||2008','GTA 4||Rockstar Games||2008','Grandia||SCEA||1997', \\\n'Half Life 2||Valve||2004']\n\ngame_manager(info)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@listen_to('(.*)')
def receive_question(message, question_string):
if message._body['channel'] == SLACK_CHANNEL:
try:
query_ccjieba = ccjieba.cut(question_string.strip())
query_unigram = unigram.cut(question_string.strip())
results = post_multifield_query(client, index='post',
query_ccjieba=concat_tokens(query_ccjieba, pos=False),
query_unigram=concat_tokens(query_unigram, pos=False), top=
top_title)
ans = avg_pmi(query_unigram, results, pairs_cnt,
total_pairs_cnt, tokenizer='unigram')
ans_string = '\n'.join(['<{:.3f}> <title:{}> comment: {}'.
format(score, title, comment) for score, comment, title in
ans[:top_response]])
message.send(ans_string)
except Exception as err:
print(err)
def main():
bot = Bot()
bot.run()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config.read(package_dir + '/chatbot_apps/config.ini')
<|reserved_special_token_0|>
@listen_to('(.*)')
def receive_question(message, question_string):
if message._body['channel'] == SLACK_CHANNEL:
try:
query_ccjieba = ccjieba.cut(question_string.strip())
query_unigram = unigram.cut(question_string.strip())
results = post_multifield_query(client, index='post',
query_ccjieba=concat_tokens(query_ccjieba, pos=False),
query_unigram=concat_tokens(query_unigram, pos=False), top=
top_title)
ans = avg_pmi(query_unigram, results, pairs_cnt,
total_pairs_cnt, tokenizer='unigram')
ans_string = '\n'.join(['<{:.3f}> <title:{}> comment: {}'.
format(score, title, comment) for score, comment, title in
ans[:top_response]])
message.send(ans_string)
except Exception as err:
print(err)
def main():
bot = Bot()
bot.run()
if __name__ == '__main__':
client = connections.create_connection()
ccjieba = CCEmojiJieba()
unigram = UniGram()
t = time.time()
print('Loading unigram pmi pickle')
with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:
pairs_cnt = dict(pickle.load(f))
total_pairs_cnt = sum(pairs_cnt.values())
print('Pickle loaded in {:.5f}s'.format(time.time() - t))
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
top_title = 100
top_response = 15
package_dir = os.path.dirname(os.path.realpath(__name__))
config = ConfigParser()
config.read(package_dir + '/chatbot_apps/config.ini')
bot.settings.API_TOKEN = config.get('slack', 'slack_token')
SLACK_CHANNEL = config.get('slack', 'slack_channel')
@listen_to('(.*)')
def receive_question(message, question_string):
if message._body['channel'] == SLACK_CHANNEL:
try:
query_ccjieba = ccjieba.cut(question_string.strip())
query_unigram = unigram.cut(question_string.strip())
results = post_multifield_query(client, index='post',
query_ccjieba=concat_tokens(query_ccjieba, pos=False),
query_unigram=concat_tokens(query_unigram, pos=False), top=
top_title)
ans = avg_pmi(query_unigram, results, pairs_cnt,
total_pairs_cnt, tokenizer='unigram')
ans_string = '\n'.join(['<{:.3f}> <title:{}> comment: {}'.
format(score, title, comment) for score, comment, title in
ans[:top_response]])
message.send(ans_string)
except Exception as err:
print(err)
def main():
bot = Bot()
bot.run()
if __name__ == '__main__':
client = connections.create_connection()
ccjieba = CCEmojiJieba()
unigram = UniGram()
t = time.time()
print('Loading unigram pmi pickle')
with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:
pairs_cnt = dict(pickle.load(f))
total_pairs_cnt = sum(pairs_cnt.values())
print('Pickle loaded in {:.5f}s'.format(time.time() - t))
main()
<|reserved_special_token_1|>
import os
import time
import pickle
from configparser import ConfigParser
from slackbot import bot
from slackbot.bot import Bot
from slackbot.bot import listen_to
from elasticsearch_dsl.connections import connections
from okcom_tokenizer.tokenizers import CCEmojiJieba, UniGram
from marginalbear_elastic.query import post_multifield_query
from marginalbear_elastic.utils import concat_tokens
from marginalbear_elastic.ranking import avg_pmi
top_title = 100
top_response = 15
package_dir = os.path.dirname(os.path.realpath(__name__))
config = ConfigParser()
config.read(package_dir + '/chatbot_apps/config.ini')
bot.settings.API_TOKEN = config.get('slack', 'slack_token')
SLACK_CHANNEL = config.get('slack', 'slack_channel')
@listen_to('(.*)')
def receive_question(message, question_string):
if message._body['channel'] == SLACK_CHANNEL:
try:
query_ccjieba = ccjieba.cut(question_string.strip())
query_unigram = unigram.cut(question_string.strip())
results = post_multifield_query(client, index='post',
query_ccjieba=concat_tokens(query_ccjieba, pos=False),
query_unigram=concat_tokens(query_unigram, pos=False), top=
top_title)
ans = avg_pmi(query_unigram, results, pairs_cnt,
total_pairs_cnt, tokenizer='unigram')
ans_string = '\n'.join(['<{:.3f}> <title:{}> comment: {}'.
format(score, title, comment) for score, comment, title in
ans[:top_response]])
message.send(ans_string)
except Exception as err:
print(err)
def main():
bot = Bot()
bot.run()
if __name__ == '__main__':
client = connections.create_connection()
ccjieba = CCEmojiJieba()
unigram = UniGram()
t = time.time()
print('Loading unigram pmi pickle')
with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:
pairs_cnt = dict(pickle.load(f))
total_pairs_cnt = sum(pairs_cnt.values())
print('Pickle loaded in {:.5f}s'.format(time.time() - t))
main()
<|reserved_special_token_1|>
import os
import time
import pickle
from configparser import ConfigParser
from slackbot import bot
from slackbot.bot import Bot
from slackbot.bot import listen_to
from elasticsearch_dsl.connections import connections
from okcom_tokenizer.tokenizers import CCEmojiJieba, UniGram
from marginalbear_elastic.query import post_multifield_query
from marginalbear_elastic.utils import concat_tokens
from marginalbear_elastic.ranking import avg_pmi
top_title = 100
top_response = 15
package_dir = os.path.dirname(os.path.realpath(__name__))
config = ConfigParser()
config.read(package_dir + '/chatbot_apps/config.ini')
bot.settings.API_TOKEN = config.get('slack', 'slack_token')
SLACK_CHANNEL = config.get('slack', 'slack_channel')
@listen_to(r'(.*)')
def receive_question(message, question_string):
if message._body['channel'] == SLACK_CHANNEL:
try:
query_ccjieba = ccjieba.cut(question_string.strip())
query_unigram = unigram.cut(question_string.strip())
results = post_multifield_query(client,
index='post',
query_ccjieba=concat_tokens(query_ccjieba, pos=False),
query_unigram=concat_tokens(query_unigram, pos=False),
top=top_title)
ans = avg_pmi(query_unigram, results, pairs_cnt, total_pairs_cnt, tokenizer='unigram')
ans_string = '\n'.join(['<{:.3f}> <title:{}> comment: {}'.format(score, title, comment) for score, comment, title in ans[:top_response]])
message.send(ans_string)
except Exception as err:
print(err)
def main():
bot = Bot()
bot.run()
if __name__ == '__main__':
client = connections.create_connection()
ccjieba = CCEmojiJieba()
unigram = UniGram()
t = time.time()
print('Loading unigram pmi pickle')
with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:
pairs_cnt = dict(pickle.load(f))
total_pairs_cnt = sum(pairs_cnt.values())
print('Pickle loaded in {:.5f}s'.format(time.time() - t))
main()
|
flexible
|
{
"blob_id": "3630f83e7e6a10f42e96f8bd6fa9714232d9176b",
"index": 4552,
"step-1": "<mask token>\n\n\n@listen_to('(.*)')\ndef receive_question(message, question_string):\n if message._body['channel'] == SLACK_CHANNEL:\n try:\n query_ccjieba = ccjieba.cut(question_string.strip())\n query_unigram = unigram.cut(question_string.strip())\n results = post_multifield_query(client, index='post',\n query_ccjieba=concat_tokens(query_ccjieba, pos=False),\n query_unigram=concat_tokens(query_unigram, pos=False), top=\n top_title)\n ans = avg_pmi(query_unigram, results, pairs_cnt,\n total_pairs_cnt, tokenizer='unigram')\n ans_string = '\\n'.join(['<{:.3f}> <title:{}> comment: {}'.\n format(score, title, comment) for score, comment, title in\n ans[:top_response]])\n message.send(ans_string)\n except Exception as err:\n print(err)\n\n\ndef main():\n bot = Bot()\n bot.run()\n\n\n<mask token>\n",
"step-2": "<mask token>\nconfig.read(package_dir + '/chatbot_apps/config.ini')\n<mask token>\n\n\n@listen_to('(.*)')\ndef receive_question(message, question_string):\n if message._body['channel'] == SLACK_CHANNEL:\n try:\n query_ccjieba = ccjieba.cut(question_string.strip())\n query_unigram = unigram.cut(question_string.strip())\n results = post_multifield_query(client, index='post',\n query_ccjieba=concat_tokens(query_ccjieba, pos=False),\n query_unigram=concat_tokens(query_unigram, pos=False), top=\n top_title)\n ans = avg_pmi(query_unigram, results, pairs_cnt,\n total_pairs_cnt, tokenizer='unigram')\n ans_string = '\\n'.join(['<{:.3f}> <title:{}> comment: {}'.\n format(score, title, comment) for score, comment, title in\n ans[:top_response]])\n message.send(ans_string)\n except Exception as err:\n print(err)\n\n\ndef main():\n bot = Bot()\n bot.run()\n\n\nif __name__ == '__main__':\n client = connections.create_connection()\n ccjieba = CCEmojiJieba()\n unigram = UniGram()\n t = time.time()\n print('Loading unigram pmi pickle')\n with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:\n pairs_cnt = dict(pickle.load(f))\n total_pairs_cnt = sum(pairs_cnt.values())\n print('Pickle loaded in {:.5f}s'.format(time.time() - t))\n main()\n",
"step-3": "<mask token>\ntop_title = 100\ntop_response = 15\npackage_dir = os.path.dirname(os.path.realpath(__name__))\nconfig = ConfigParser()\nconfig.read(package_dir + '/chatbot_apps/config.ini')\nbot.settings.API_TOKEN = config.get('slack', 'slack_token')\nSLACK_CHANNEL = config.get('slack', 'slack_channel')\n\n\n@listen_to('(.*)')\ndef receive_question(message, question_string):\n if message._body['channel'] == SLACK_CHANNEL:\n try:\n query_ccjieba = ccjieba.cut(question_string.strip())\n query_unigram = unigram.cut(question_string.strip())\n results = post_multifield_query(client, index='post',\n query_ccjieba=concat_tokens(query_ccjieba, pos=False),\n query_unigram=concat_tokens(query_unigram, pos=False), top=\n top_title)\n ans = avg_pmi(query_unigram, results, pairs_cnt,\n total_pairs_cnt, tokenizer='unigram')\n ans_string = '\\n'.join(['<{:.3f}> <title:{}> comment: {}'.\n format(score, title, comment) for score, comment, title in\n ans[:top_response]])\n message.send(ans_string)\n except Exception as err:\n print(err)\n\n\ndef main():\n bot = Bot()\n bot.run()\n\n\nif __name__ == '__main__':\n client = connections.create_connection()\n ccjieba = CCEmojiJieba()\n unigram = UniGram()\n t = time.time()\n print('Loading unigram pmi pickle')\n with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:\n pairs_cnt = dict(pickle.load(f))\n total_pairs_cnt = sum(pairs_cnt.values())\n print('Pickle loaded in {:.5f}s'.format(time.time() - t))\n main()\n",
"step-4": "import os\nimport time\nimport pickle\nfrom configparser import ConfigParser\nfrom slackbot import bot\nfrom slackbot.bot import Bot\nfrom slackbot.bot import listen_to\nfrom elasticsearch_dsl.connections import connections\nfrom okcom_tokenizer.tokenizers import CCEmojiJieba, UniGram\nfrom marginalbear_elastic.query import post_multifield_query\nfrom marginalbear_elastic.utils import concat_tokens\nfrom marginalbear_elastic.ranking import avg_pmi\ntop_title = 100\ntop_response = 15\npackage_dir = os.path.dirname(os.path.realpath(__name__))\nconfig = ConfigParser()\nconfig.read(package_dir + '/chatbot_apps/config.ini')\nbot.settings.API_TOKEN = config.get('slack', 'slack_token')\nSLACK_CHANNEL = config.get('slack', 'slack_channel')\n\n\n@listen_to('(.*)')\ndef receive_question(message, question_string):\n if message._body['channel'] == SLACK_CHANNEL:\n try:\n query_ccjieba = ccjieba.cut(question_string.strip())\n query_unigram = unigram.cut(question_string.strip())\n results = post_multifield_query(client, index='post',\n query_ccjieba=concat_tokens(query_ccjieba, pos=False),\n query_unigram=concat_tokens(query_unigram, pos=False), top=\n top_title)\n ans = avg_pmi(query_unigram, results, pairs_cnt,\n total_pairs_cnt, tokenizer='unigram')\n ans_string = '\\n'.join(['<{:.3f}> <title:{}> comment: {}'.\n format(score, title, comment) for score, comment, title in\n ans[:top_response]])\n message.send(ans_string)\n except Exception as err:\n print(err)\n\n\ndef main():\n bot = Bot()\n bot.run()\n\n\nif __name__ == '__main__':\n client = connections.create_connection()\n ccjieba = CCEmojiJieba()\n unigram = UniGram()\n t = time.time()\n print('Loading unigram pmi pickle')\n with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:\n pairs_cnt = dict(pickle.load(f))\n total_pairs_cnt = sum(pairs_cnt.values())\n print('Pickle loaded in {:.5f}s'.format(time.time() - t))\n main()\n",
"step-5": "import os\nimport time\nimport pickle\nfrom configparser import ConfigParser\n\nfrom slackbot import bot\nfrom slackbot.bot import Bot\nfrom slackbot.bot import listen_to\nfrom elasticsearch_dsl.connections import connections\n\nfrom okcom_tokenizer.tokenizers import CCEmojiJieba, UniGram\nfrom marginalbear_elastic.query import post_multifield_query\nfrom marginalbear_elastic.utils import concat_tokens\nfrom marginalbear_elastic.ranking import avg_pmi\n\n\ntop_title = 100\ntop_response = 15\n\npackage_dir = os.path.dirname(os.path.realpath(__name__))\nconfig = ConfigParser()\nconfig.read(package_dir + '/chatbot_apps/config.ini')\nbot.settings.API_TOKEN = config.get('slack', 'slack_token')\nSLACK_CHANNEL = config.get('slack', 'slack_channel')\n\n\n@listen_to(r'(.*)')\ndef receive_question(message, question_string):\n if message._body['channel'] == SLACK_CHANNEL:\n try:\n query_ccjieba = ccjieba.cut(question_string.strip())\n query_unigram = unigram.cut(question_string.strip())\n results = post_multifield_query(client,\n index='post',\n query_ccjieba=concat_tokens(query_ccjieba, pos=False),\n query_unigram=concat_tokens(query_unigram, pos=False),\n top=top_title)\n ans = avg_pmi(query_unigram, results, pairs_cnt, total_pairs_cnt, tokenizer='unigram')\n ans_string = '\\n'.join(['<{:.3f}> <title:{}> comment: {}'.format(score, title, comment) for score, comment, title in ans[:top_response]])\n message.send(ans_string)\n except Exception as err:\n print(err)\n\n\ndef main():\n bot = Bot()\n bot.run()\n\n\nif __name__ == '__main__':\n client = connections.create_connection()\n ccjieba = CCEmojiJieba()\n unigram = UniGram()\n t = time.time()\n print('Loading unigram pmi pickle')\n with open(package_dir + '/data/pmi_pickle/pmi_unigram.pickle', 'rb') as f:\n pairs_cnt = dict(pickle.load(f))\n total_pairs_cnt = sum(pairs_cnt.values())\n print('Pickle loaded in {:.5f}s'.format(time.time() - t))\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class TestBloomFilter(object):
def test_setup(self):
bf = BloomFilter(1000)
assert 10 == bf._num_hashes
assert 14380 == bf._num_bits
assert 14380 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
bf = BloomFilter(1000, error=0.01)
assert 7 == bf._num_hashes
assert 9583 == bf._num_bits
assert 9583 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
def test_add_contains(self):
bf = BloomFilter(1000, error=0.01)
keys1 = [random_string(10) for _ in range(1000)]
keys2 = [random_string(10) for _ in range(1000)]
for k in keys1:
bf.add(k)
assert k in bf
class TestScalableBloomFilter(object):
def test_scaling(self):
S, N, E = 1000, 10000, 0.01
sbf = ScalableBloomFilter(S, E, 2)
keys1 = {random_string(10) for _ in range(N)}
keys2 = {random_string(10) for _ in range(N)}
for k in keys1:
sbf.add(k)
assert k in sbf
error = 0
total = 0
for k in keys2:
if k in keys1:
continue
total += 1
if k in sbf:
error += 1
error_rate = error / total
assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (
error_rate, E)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def random_string(N):
return ''.join([random.choice(alphabet) for _ in range(N)])
class TestBloomFilter(object):
def test_setup(self):
bf = BloomFilter(1000)
assert 10 == bf._num_hashes
assert 14380 == bf._num_bits
assert 14380 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
bf = BloomFilter(1000, error=0.01)
assert 7 == bf._num_hashes
assert 9583 == bf._num_bits
assert 9583 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
def test_add_contains(self):
bf = BloomFilter(1000, error=0.01)
keys1 = [random_string(10) for _ in range(1000)]
keys2 = [random_string(10) for _ in range(1000)]
for k in keys1:
bf.add(k)
assert k in bf
class TestScalableBloomFilter(object):
def test_scaling(self):
S, N, E = 1000, 10000, 0.01
sbf = ScalableBloomFilter(S, E, 2)
keys1 = {random_string(10) for _ in range(N)}
keys2 = {random_string(10) for _ in range(N)}
for k in keys1:
sbf.add(k)
assert k in sbf
error = 0
total = 0
for k in keys2:
if k in keys1:
continue
total += 1
if k in sbf:
error += 1
error_rate = error / total
assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (
error_rate, E)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
alphabet = string.ascii_letters
def random_string(N):
return ''.join([random.choice(alphabet) for _ in range(N)])
class TestBloomFilter(object):
def test_setup(self):
bf = BloomFilter(1000)
assert 10 == bf._num_hashes
assert 14380 == bf._num_bits
assert 14380 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
bf = BloomFilter(1000, error=0.01)
assert 7 == bf._num_hashes
assert 9583 == bf._num_bits
assert 9583 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
def test_add_contains(self):
bf = BloomFilter(1000, error=0.01)
keys1 = [random_string(10) for _ in range(1000)]
keys2 = [random_string(10) for _ in range(1000)]
for k in keys1:
bf.add(k)
assert k in bf
class TestScalableBloomFilter(object):
def test_scaling(self):
S, N, E = 1000, 10000, 0.01
sbf = ScalableBloomFilter(S, E, 2)
keys1 = {random_string(10) for _ in range(N)}
keys2 = {random_string(10) for _ in range(N)}
for k in keys1:
sbf.add(k)
assert k in sbf
error = 0
total = 0
for k in keys2:
if k in keys1:
continue
total += 1
if k in sbf:
error += 1
error_rate = error / total
assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (
error_rate, E)
<|reserved_special_token_1|>
from pyloom import *
import random
import string
alphabet = string.ascii_letters
def random_string(N):
return ''.join([random.choice(alphabet) for _ in range(N)])
class TestBloomFilter(object):
def test_setup(self):
bf = BloomFilter(1000)
assert 10 == bf._num_hashes
assert 14380 == bf._num_bits
assert 14380 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
bf = BloomFilter(1000, error=0.01)
assert 7 == bf._num_hashes
assert 9583 == bf._num_bits
assert 9583 == len(bf._bitarray)
assert 0 == bf._bitarray.count()
def test_add_contains(self):
bf = BloomFilter(1000, error=0.01)
keys1 = [random_string(10) for _ in range(1000)]
keys2 = [random_string(10) for _ in range(1000)]
for k in keys1:
bf.add(k)
assert k in bf
class TestScalableBloomFilter(object):
def test_scaling(self):
S, N, E = 1000, 10000, 0.01
sbf = ScalableBloomFilter(S, E, 2)
keys1 = {random_string(10) for _ in range(N)}
keys2 = {random_string(10) for _ in range(N)}
for k in keys1:
sbf.add(k)
assert k in sbf
error = 0
total = 0
for k in keys2:
if k in keys1:
continue
total += 1
if k in sbf:
error += 1
error_rate = error / total
assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (
error_rate, E)
<|reserved_special_token_1|>
from pyloom import *
import random
import string
alphabet = string.ascii_letters
def random_string(N):
return ''.join([random.choice(alphabet) for _ in range(N)])
class TestBloomFilter(object):
def test_setup(self):
bf = BloomFilter(1000)
assert 10 == bf._num_hashes
assert 14380 == bf._num_bits
assert 14380 == len(bf._bitarray)
# and initially all bits are False
assert 0 == bf._bitarray.count()
# test again with a different false positive rate
bf = BloomFilter(1000, error=0.01)
assert 7 == bf._num_hashes
assert 9583 == bf._num_bits
assert 9583 == len(bf._bitarray)
# and initially all bits are False
assert 0 == bf._bitarray.count()
def test_add_contains(self):
bf = BloomFilter(1000, error=0.01)
keys1 = [random_string(10) for _ in range(1000)]
keys2 = [random_string(10) for _ in range(1000)]
for k in keys1:
bf.add(k)
assert k in bf
class TestScalableBloomFilter(object):
def test_scaling(self):
S, N, E = 1000, 10000, 0.01
# create a bloom filter with initial capacity of S
sbf = ScalableBloomFilter(S, E, 2)
keys1 = {random_string(10) for _ in range(N)}
keys2 = {random_string(10) for _ in range(N)}
for k in keys1:
sbf.add(k)
assert k in sbf
error = 0
total = 0
for k in keys2:
if k in keys1:
continue
total += 1
if k in sbf:
error += 1
error_rate = error / total
assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (error_rate, E)
|
flexible
|
{
"blob_id": "24e486edc6f80e0b7d58b5df898e6d34f53111c8",
"index": 4389,
"step-1": "<mask token>\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-2": "<mask token>\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-3": "<mask token>\nalphabet = string.ascii_letters\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-4": "from pyloom import *\nimport random\nimport string\nalphabet = string.ascii_letters\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-5": "from pyloom import *\n\nimport random\nimport string\n\nalphabet = string.ascii_letters\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n\n # and initially all bits are False\n assert 0 == bf._bitarray.count()\n\n # test again with a different false positive rate\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n\n # and initially all bits are False\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n\n for k in keys1:\n bf.add(k)\n assert k in bf\nclass TestScalableBloomFilter(object):\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n\n # create a bloom filter with initial capacity of S\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n\n total += 1\n if k in sbf:\n error += 1\n\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (error_rate, E)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class PrinterTkinter:
def __init__(self):
self.root = Tk()
self.root.title('气球发放')
self.runid_to_node = dict()
self.runid_to_uid = dict()
self.runid_to_pid = dict()
self.have_uid_pid = set()
self.unfinished_runid = []
self.frame_left_top = Frame(width=400, height=200)
self.frame_right_top = Frame(width=400, height=200)
self.frame_center = Frame(width=800, height=400)
self.frame_bottom = Frame(width=800, height=50)
self.left_top_title = Label(self.frame_left_top, text='发放状态:', font
=('Arial', 25))
self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,
padx=50, pady=30)
self.var_finish = StringVar()
self.var_wait = StringVar()
self.left_top_frame = Frame(self.frame_left_top)
self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',
font=('Arial', 20))
self.left_top_frame_left2 = Label(self.frame_left_top, textvariable
=self.var_finish, font=('Arial', 15))
self.var_finish.set(0)
self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',
font=('Arial', 20))
self.left_top_frame_right2 = Label(self.frame_left_top,
textvariable=self.var_wait, font=('Arial', 15))
self.var_wait.set(0)
self.left_top_frame_left1.grid(row=1, column=0)
self.left_top_frame_left2.grid(row=1, column=1)
self.left_top_frame_right1.grid(row=2, column=0)
self.left_top_frame_right2.grid(row=2, column=1)
self.var_entry = StringVar()
self.right_top_title = Label(self.frame_right_top, text=
'切换状态(输入runid):', font=('Arial', 20))
self.right_top_entry = Entry(self.frame_right_top, textvariable=
self.var_entry)
self.number = int
self.right_top_button = Button(self.frame_right_top, text='确定',
command=self.button_switch, font=('Arial', 15))
self.right_top_title.grid(row=0, column=0)
self.right_top_entry.grid(row=1, column=0)
self.right_top_button.grid(row=2, column=0, padx=20, pady=20)
self.tree = ttk.Treeview(self.frame_center, show='headings', height
=18, columns=('a', 'b', 'c', 'd', 'e'))
self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,
command=self.tree.yview)
self.tree.configure(yscrollcommand=self.vbar.set)
self.tree.column('a', width=50, anchor='center')
self.tree.column('b', width=150, anchor='center')
self.tree.column('c', width=150, anchor='center')
self.tree.column('d', width=200, anchor='center')
self.tree.column('e', width=150, anchor='center')
self.tree.heading('a', text='Runid')
self.tree.heading('b', text='User')
self.tree.heading('c', text='Problem')
self.tree.heading('d', text='Time')
self.tree.heading('e', text='Status')
self.get_tree()
self.tree.grid(row=0, column=0, sticky=NSEW)
self.vbar.grid(row=0, column=1, sticky=NS)
self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)
self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)
self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)
self.frame_bottom.grid(row=2, column=0, columnspan=2)
self.frame_left_top.grid_propagate(0)
self.frame_right_top.grid_propagate(0)
self.frame_center.grid_propagate(0)
self.frame_bottom.grid_propagate(0)
thread.start_new_thread(self.listen, ())
self.root.mainloop()
def get_tree(self):
bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)
for bak in bak_list:
bak = bak.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
def button_switch(self):
self.number = self.right_top_entry.get()
runid = int(self.right_top_entry.get())
if not runid in self.runid_to_node:
return
self.tree.delete(self.runid_to_node[runid])
uid = self.runid_to_uid[runid]
pid = self.runid_to_pid[runid]
status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
if status_before == STATUS_WAIT:
status = STATUS_FINISHED
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)
else:
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)
if status == STATUS_FINISHED:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.pop(pos)
pos = 'end'
else:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(uid),
get_problem_color(pid), submit_time, status))
if status == STATUS_WAIT:
self.var_wait.set(int(self.var_wait.get()) + 1)
self.var_finish.set(int(self.var_finish.get()) - 1)
else:
self.var_wait.set(int(self.var_wait.get()) - 1)
self.var_finish.set(int(self.var_finish.get()) + 1)
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
self.runid_to_node[runid] = node
def listen(self):
while True:
msg = R.blpop(QUEUE_NAME, 0)[1]
R.rpush(BACKUP_QUEUE_NAME, msg)
bak = msg.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lower_bound(arr, key):
left = 0
right = len(arr) - 1
res = len(arr)
while left <= right:
mid = left + right >> 1
if arr[mid] >= key:
res = mid
right = mid - 1
else:
left = mid + 1
return res
def get_status_key(user_id, pid):
return 'status_%d_%d' % (user_id, pid)
def get_name(user_id):
user_id = str(user_id)
if user_id in NAME:
return NAME[user_id]
else:
return 'user: %s' % user_id
<|reserved_special_token_0|>
class PrinterTkinter:
def __init__(self):
self.root = Tk()
self.root.title('气球发放')
self.runid_to_node = dict()
self.runid_to_uid = dict()
self.runid_to_pid = dict()
self.have_uid_pid = set()
self.unfinished_runid = []
self.frame_left_top = Frame(width=400, height=200)
self.frame_right_top = Frame(width=400, height=200)
self.frame_center = Frame(width=800, height=400)
self.frame_bottom = Frame(width=800, height=50)
self.left_top_title = Label(self.frame_left_top, text='发放状态:', font
=('Arial', 25))
self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,
padx=50, pady=30)
self.var_finish = StringVar()
self.var_wait = StringVar()
self.left_top_frame = Frame(self.frame_left_top)
self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',
font=('Arial', 20))
self.left_top_frame_left2 = Label(self.frame_left_top, textvariable
=self.var_finish, font=('Arial', 15))
self.var_finish.set(0)
self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',
font=('Arial', 20))
self.left_top_frame_right2 = Label(self.frame_left_top,
textvariable=self.var_wait, font=('Arial', 15))
self.var_wait.set(0)
self.left_top_frame_left1.grid(row=1, column=0)
self.left_top_frame_left2.grid(row=1, column=1)
self.left_top_frame_right1.grid(row=2, column=0)
self.left_top_frame_right2.grid(row=2, column=1)
self.var_entry = StringVar()
self.right_top_title = Label(self.frame_right_top, text=
'切换状态(输入runid):', font=('Arial', 20))
self.right_top_entry = Entry(self.frame_right_top, textvariable=
self.var_entry)
self.number = int
self.right_top_button = Button(self.frame_right_top, text='确定',
command=self.button_switch, font=('Arial', 15))
self.right_top_title.grid(row=0, column=0)
self.right_top_entry.grid(row=1, column=0)
self.right_top_button.grid(row=2, column=0, padx=20, pady=20)
self.tree = ttk.Treeview(self.frame_center, show='headings', height
=18, columns=('a', 'b', 'c', 'd', 'e'))
self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,
command=self.tree.yview)
self.tree.configure(yscrollcommand=self.vbar.set)
self.tree.column('a', width=50, anchor='center')
self.tree.column('b', width=150, anchor='center')
self.tree.column('c', width=150, anchor='center')
self.tree.column('d', width=200, anchor='center')
self.tree.column('e', width=150, anchor='center')
self.tree.heading('a', text='Runid')
self.tree.heading('b', text='User')
self.tree.heading('c', text='Problem')
self.tree.heading('d', text='Time')
self.tree.heading('e', text='Status')
self.get_tree()
self.tree.grid(row=0, column=0, sticky=NSEW)
self.vbar.grid(row=0, column=1, sticky=NS)
self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)
self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)
self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)
self.frame_bottom.grid(row=2, column=0, columnspan=2)
self.frame_left_top.grid_propagate(0)
self.frame_right_top.grid_propagate(0)
self.frame_center.grid_propagate(0)
self.frame_bottom.grid_propagate(0)
thread.start_new_thread(self.listen, ())
self.root.mainloop()
def get_tree(self):
bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)
for bak in bak_list:
bak = bak.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
def button_switch(self):
self.number = self.right_top_entry.get()
runid = int(self.right_top_entry.get())
if not runid in self.runid_to_node:
return
self.tree.delete(self.runid_to_node[runid])
uid = self.runid_to_uid[runid]
pid = self.runid_to_pid[runid]
status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
if status_before == STATUS_WAIT:
status = STATUS_FINISHED
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)
else:
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)
if status == STATUS_FINISHED:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.pop(pos)
pos = 'end'
else:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(uid),
get_problem_color(pid), submit_time, status))
if status == STATUS_WAIT:
self.var_wait.set(int(self.var_wait.get()) + 1)
self.var_finish.set(int(self.var_finish.get()) - 1)
else:
self.var_wait.set(int(self.var_wait.get()) - 1)
self.var_finish.set(int(self.var_finish.get()) + 1)
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
self.runid_to_node[runid] = node
def listen(self):
while True:
msg = R.blpop(QUEUE_NAME, 0)[1]
R.rpush(BACKUP_QUEUE_NAME, msg)
bak = msg.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lower_bound(arr, key):
left = 0
right = len(arr) - 1
res = len(arr)
while left <= right:
mid = left + right >> 1
if arr[mid] >= key:
res = mid
right = mid - 1
else:
left = mid + 1
return res
def get_status_key(user_id, pid):
return 'status_%d_%d' % (user_id, pid)
def get_name(user_id):
user_id = str(user_id)
if user_id in NAME:
return NAME[user_id]
else:
return 'user: %s' % user_id
def get_problem_color(pid):
pid = str(pid)
if pid in PROBLEM_NAME:
return PROBLEM_NAME[pid]
else:
return str(pid)
class PrinterTkinter:
def __init__(self):
self.root = Tk()
self.root.title('气球发放')
self.runid_to_node = dict()
self.runid_to_uid = dict()
self.runid_to_pid = dict()
self.have_uid_pid = set()
self.unfinished_runid = []
self.frame_left_top = Frame(width=400, height=200)
self.frame_right_top = Frame(width=400, height=200)
self.frame_center = Frame(width=800, height=400)
self.frame_bottom = Frame(width=800, height=50)
self.left_top_title = Label(self.frame_left_top, text='发放状态:', font
=('Arial', 25))
self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,
padx=50, pady=30)
self.var_finish = StringVar()
self.var_wait = StringVar()
self.left_top_frame = Frame(self.frame_left_top)
self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',
font=('Arial', 20))
self.left_top_frame_left2 = Label(self.frame_left_top, textvariable
=self.var_finish, font=('Arial', 15))
self.var_finish.set(0)
self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',
font=('Arial', 20))
self.left_top_frame_right2 = Label(self.frame_left_top,
textvariable=self.var_wait, font=('Arial', 15))
self.var_wait.set(0)
self.left_top_frame_left1.grid(row=1, column=0)
self.left_top_frame_left2.grid(row=1, column=1)
self.left_top_frame_right1.grid(row=2, column=0)
self.left_top_frame_right2.grid(row=2, column=1)
self.var_entry = StringVar()
self.right_top_title = Label(self.frame_right_top, text=
'切换状态(输入runid):', font=('Arial', 20))
self.right_top_entry = Entry(self.frame_right_top, textvariable=
self.var_entry)
self.number = int
self.right_top_button = Button(self.frame_right_top, text='确定',
command=self.button_switch, font=('Arial', 15))
self.right_top_title.grid(row=0, column=0)
self.right_top_entry.grid(row=1, column=0)
self.right_top_button.grid(row=2, column=0, padx=20, pady=20)
self.tree = ttk.Treeview(self.frame_center, show='headings', height
=18, columns=('a', 'b', 'c', 'd', 'e'))
self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,
command=self.tree.yview)
self.tree.configure(yscrollcommand=self.vbar.set)
self.tree.column('a', width=50, anchor='center')
self.tree.column('b', width=150, anchor='center')
self.tree.column('c', width=150, anchor='center')
self.tree.column('d', width=200, anchor='center')
self.tree.column('e', width=150, anchor='center')
self.tree.heading('a', text='Runid')
self.tree.heading('b', text='User')
self.tree.heading('c', text='Problem')
self.tree.heading('d', text='Time')
self.tree.heading('e', text='Status')
self.get_tree()
self.tree.grid(row=0, column=0, sticky=NSEW)
self.vbar.grid(row=0, column=1, sticky=NS)
self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)
self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)
self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)
self.frame_bottom.grid(row=2, column=0, columnspan=2)
self.frame_left_top.grid_propagate(0)
self.frame_right_top.grid_propagate(0)
self.frame_center.grid_propagate(0)
self.frame_bottom.grid_propagate(0)
thread.start_new_thread(self.listen, ())
self.root.mainloop()
def get_tree(self):
bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)
for bak in bak_list:
bak = bak.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
def button_switch(self):
self.number = self.right_top_entry.get()
runid = int(self.right_top_entry.get())
if not runid in self.runid_to_node:
return
self.tree.delete(self.runid_to_node[runid])
uid = self.runid_to_uid[runid]
pid = self.runid_to_pid[runid]
status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
if status_before == STATUS_WAIT:
status = STATUS_FINISHED
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)
else:
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)
if status == STATUS_FINISHED:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.pop(pos)
pos = 'end'
else:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(uid),
get_problem_color(pid), submit_time, status))
if status == STATUS_WAIT:
self.var_wait.set(int(self.var_wait.get()) + 1)
self.var_finish.set(int(self.var_finish.get()) - 1)
else:
self.var_wait.set(int(self.var_wait.get()) - 1)
self.var_finish.set(int(self.var_finish.get()) + 1)
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
self.runid_to_node[runid] = node
def listen(self):
while True:
msg = R.blpop(QUEUE_NAME, 0)[1]
R.rpush(BACKUP_QUEUE_NAME, msg)
bak = msg.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')
parser.add_argument('--namefile', dest='namefile', required=True, type=
str, help='such as namefile.json')
parser.add_argument('--problemfile', dest='problemfile', required=True,
type=str, help='such as problemfile.json')
parser.add_argument('--redishost', dest='redishost', required=True,
type=str, help='such as 127.0.0.1')
parser.add_argument('--redisport', dest='redisport', required=True,
type=int, help='such as 6379')
parser.add_argument('--contestid', dest='contestid', required=True,
type=int, help='such as 9')
args = parser.parse_args()
R = redis.Redis(host=args.redishost, port=args.redisport)
CONTEST_ID = args.contestid
with open(args.namefile) as f:
NAME = json.loads(f.read())
with open(args.problemfile) as f:
PROBLEM_NAME = json.loads(f.read())
QUEUE_NAME = 'ballon_%d' % CONTEST_ID
BACKUP_QUEUE_NAME = 'ballon_bak_%d' % CONTEST_ID
PrinterTkinter()
<|reserved_special_token_1|>
import argparse
import redis
from Tkinter import *
import ttk
import json
import time
import thread
R = None
NAME = {}
PROBLEM_NAME = {}
CONTEST_ID = None
QUEUE_NAME = None
BACKUP_QUEUE_NAME = None
RUNID_FIELD = 'runid'
SUBMIT_TIME_FIELD = 'submit_time'
STATUS_FIELD = 'status'
STATUS_FINISHED = 'finished'
STATUS_WAIT = 'wait'
def lower_bound(arr, key):
left = 0
right = len(arr) - 1
res = len(arr)
while left <= right:
mid = left + right >> 1
if arr[mid] >= key:
res = mid
right = mid - 1
else:
left = mid + 1
return res
def get_status_key(user_id, pid):
return 'status_%d_%d' % (user_id, pid)
def get_name(user_id):
user_id = str(user_id)
if user_id in NAME:
return NAME[user_id]
else:
return 'user: %s' % user_id
def get_problem_color(pid):
pid = str(pid)
if pid in PROBLEM_NAME:
return PROBLEM_NAME[pid]
else:
return str(pid)
class PrinterTkinter:
def __init__(self):
self.root = Tk()
self.root.title('气球发放')
self.runid_to_node = dict()
self.runid_to_uid = dict()
self.runid_to_pid = dict()
self.have_uid_pid = set()
self.unfinished_runid = []
self.frame_left_top = Frame(width=400, height=200)
self.frame_right_top = Frame(width=400, height=200)
self.frame_center = Frame(width=800, height=400)
self.frame_bottom = Frame(width=800, height=50)
self.left_top_title = Label(self.frame_left_top, text='发放状态:', font
=('Arial', 25))
self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,
padx=50, pady=30)
self.var_finish = StringVar()
self.var_wait = StringVar()
self.left_top_frame = Frame(self.frame_left_top)
self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',
font=('Arial', 20))
self.left_top_frame_left2 = Label(self.frame_left_top, textvariable
=self.var_finish, font=('Arial', 15))
self.var_finish.set(0)
self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',
font=('Arial', 20))
self.left_top_frame_right2 = Label(self.frame_left_top,
textvariable=self.var_wait, font=('Arial', 15))
self.var_wait.set(0)
self.left_top_frame_left1.grid(row=1, column=0)
self.left_top_frame_left2.grid(row=1, column=1)
self.left_top_frame_right1.grid(row=2, column=0)
self.left_top_frame_right2.grid(row=2, column=1)
self.var_entry = StringVar()
self.right_top_title = Label(self.frame_right_top, text=
'切换状态(输入runid):', font=('Arial', 20))
self.right_top_entry = Entry(self.frame_right_top, textvariable=
self.var_entry)
self.number = int
self.right_top_button = Button(self.frame_right_top, text='确定',
command=self.button_switch, font=('Arial', 15))
self.right_top_title.grid(row=0, column=0)
self.right_top_entry.grid(row=1, column=0)
self.right_top_button.grid(row=2, column=0, padx=20, pady=20)
self.tree = ttk.Treeview(self.frame_center, show='headings', height
=18, columns=('a', 'b', 'c', 'd', 'e'))
self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,
command=self.tree.yview)
self.tree.configure(yscrollcommand=self.vbar.set)
self.tree.column('a', width=50, anchor='center')
self.tree.column('b', width=150, anchor='center')
self.tree.column('c', width=150, anchor='center')
self.tree.column('d', width=200, anchor='center')
self.tree.column('e', width=150, anchor='center')
self.tree.heading('a', text='Runid')
self.tree.heading('b', text='User')
self.tree.heading('c', text='Problem')
self.tree.heading('d', text='Time')
self.tree.heading('e', text='Status')
self.get_tree()
self.tree.grid(row=0, column=0, sticky=NSEW)
self.vbar.grid(row=0, column=1, sticky=NS)
self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)
self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)
self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)
self.frame_bottom.grid(row=2, column=0, columnspan=2)
self.frame_left_top.grid_propagate(0)
self.frame_right_top.grid_propagate(0)
self.frame_center.grid_propagate(0)
self.frame_bottom.grid_propagate(0)
thread.start_new_thread(self.listen, ())
self.root.mainloop()
def get_tree(self):
bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)
for bak in bak_list:
bak = bak.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
def button_switch(self):
self.number = self.right_top_entry.get()
runid = int(self.right_top_entry.get())
if not runid in self.runid_to_node:
return
self.tree.delete(self.runid_to_node[runid])
uid = self.runid_to_uid[runid]
pid = self.runid_to_pid[runid]
status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
if status_before == STATUS_WAIT:
status = STATUS_FINISHED
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)
else:
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)
if status == STATUS_FINISHED:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.pop(pos)
pos = 'end'
else:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(uid),
get_problem_color(pid), submit_time, status))
if status == STATUS_WAIT:
self.var_wait.set(int(self.var_wait.get()) + 1)
self.var_finish.set(int(self.var_finish.get()) - 1)
else:
self.var_wait.set(int(self.var_wait.get()) - 1)
self.var_finish.set(int(self.var_finish.get()) + 1)
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
self.runid_to_node[runid] = node
def listen(self):
while True:
msg = R.blpop(QUEUE_NAME, 0)[1]
R.rpush(BACKUP_QUEUE_NAME, msg)
bak = msg.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time
)
self.have_uid_pid.add('%d_%d' % (uid, pid))
elif '%d_%d' % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid),
SUBMIT_TIME_FIELD)
self.have_uid_pid.add('%d_%d' % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = 'end'
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert('', str(pos), values=(runid, get_name(
uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')
parser.add_argument('--namefile', dest='namefile', required=True, type=
str, help='such as namefile.json')
parser.add_argument('--problemfile', dest='problemfile', required=True,
type=str, help='such as problemfile.json')
parser.add_argument('--redishost', dest='redishost', required=True,
type=str, help='such as 127.0.0.1')
parser.add_argument('--redisport', dest='redisport', required=True,
type=int, help='such as 6379')
parser.add_argument('--contestid', dest='contestid', required=True,
type=int, help='such as 9')
args = parser.parse_args()
R = redis.Redis(host=args.redishost, port=args.redisport)
CONTEST_ID = args.contestid
with open(args.namefile) as f:
NAME = json.loads(f.read())
with open(args.problemfile) as f:
PROBLEM_NAME = json.loads(f.read())
QUEUE_NAME = 'ballon_%d' % CONTEST_ID
BACKUP_QUEUE_NAME = 'ballon_bak_%d' % CONTEST_ID
PrinterTkinter()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import argparse
import redis
from Tkinter import *
import ttk
import json
import time
import thread
R = None
NAME = {}
PROBLEM_NAME = {}
CONTEST_ID = None
QUEUE_NAME = None
BACKUP_QUEUE_NAME = None
RUNID_FIELD = "runid"
SUBMIT_TIME_FIELD = "submit_time"
STATUS_FIELD = "status"
STATUS_FINISHED = "finished"
STATUS_WAIT = "wait"
def lower_bound(arr, key):
left = 0
right = len(arr) - 1
res = len(arr)
while left <= right:
mid = (left + right) >> 1
if arr[mid] >= key:
res = mid
right = mid - 1
else:
left = mid + 1
return res
def get_status_key(user_id, pid):
return "status_%d_%d" % (user_id, pid)
def get_name(user_id):
user_id = str(user_id)
if user_id in NAME:
return NAME[user_id]
else:
return "user: %s" % user_id
def get_problem_color(pid):
pid = str(pid)
if pid in PROBLEM_NAME:
return PROBLEM_NAME[pid]
else:
return str(pid)
class PrinterTkinter:
def __init__(self):
self.root = Tk()
self.root.title("气球发放")
self.runid_to_node = dict()
self.runid_to_uid = dict()
self.runid_to_pid = dict()
self.have_uid_pid = set()
self.unfinished_runid = []
self.frame_left_top = Frame(width=400, height=200)
self.frame_right_top = Frame(width=400, height=200)
self.frame_center = Frame(width=800, height=400)
self.frame_bottom = Frame(width=800, height=50)
# 定义左上方区域
self.left_top_title = Label(self.frame_left_top, text="发放状态:", font=('Arial', 25))
self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW, padx=50, pady=30)
self.var_finish = StringVar()
self.var_wait = StringVar()
self.left_top_frame = Frame(self.frame_left_top)
self.left_top_frame_left1 = Label(self.frame_left_top, text="已发放", font=('Arial', 20))
self.left_top_frame_left2 = Label(self.frame_left_top, textvariable=self.var_finish, font=('Arial', 15))
self.var_finish.set(0)
self.left_top_frame_right1 = Label(self.frame_left_top, text="未发放", font=('Arial', 20))
self.left_top_frame_right2 = Label(self.frame_left_top, textvariable=self.var_wait, font=('Arial', 15))
self.var_wait.set(0)
self.left_top_frame_left1.grid(row=1, column=0)
self.left_top_frame_left2.grid(row=1, column=1)
self.left_top_frame_right1.grid(row=2, column=0)
self.left_top_frame_right2.grid(row=2, column=1)
# 定义右上方区域
self.var_entry = StringVar()
self.right_top_title = Label(self.frame_right_top, text="切换状态(输入runid):", font=('Arial', 20))
self.right_top_entry = Entry(self.frame_right_top, textvariable=self.var_entry)
self.number = int
self.right_top_button = Button(self.frame_right_top, text="确定", command=self.button_switch, font=('Arial', 15))
self.right_top_title.grid(row=0, column=0)
self.right_top_entry.grid(row=1, column=0)
self.right_top_button.grid(row=2, column=0, padx=20, pady=20)
# 定义中心列表区域
self.tree = ttk.Treeview(self.frame_center, show="headings", height=18, columns=("a", "b", "c", "d", "e"))
self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL, command=self.tree.yview)
# 定义树形结构与滚动条
self.tree.configure(yscrollcommand=self.vbar.set)
# 表格的标题
self.tree.column("a", width=50, anchor="center")
self.tree.column("b", width=150, anchor="center")
self.tree.column("c", width=150, anchor="center")
self.tree.column("d", width=200, anchor="center")
self.tree.column("e", width=150, anchor="center")
self.tree.heading("a", text="Runid")
self.tree.heading("b", text="User")
self.tree.heading("c", text="Problem")
self.tree.heading("d", text="Time")
self.tree.heading("e", text="Status")
# 调用方法获取表格内容插入
self.get_tree()
self.tree.grid(row=0, column=0, sticky=NSEW)
self.vbar.grid(row=0, column=1, sticky=NS)
# 整体区域定位
self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)
self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)
self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)
self.frame_bottom.grid(row=2, column=0, columnspan=2)
self.frame_left_top.grid_propagate(0)
self.frame_right_top.grid_propagate(0)
self.frame_center.grid_propagate(0)
self.frame_bottom.grid_propagate(0)
thread.start_new_thread(self.listen, ())
self.root.mainloop()
# 表格内容插入
def get_tree(self):
bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)
for bak in bak_list:
bak = bak.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)
self.have_uid_pid.add("%d_%d" % (uid, pid))
elif "%d_%d" % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
self.have_uid_pid.add("%d_%d" % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = "end"
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
def button_switch(self):
self.number = self.right_top_entry.get()
runid = int(self.right_top_entry.get())
if not (runid in self.runid_to_node):
return
self.tree.delete(self.runid_to_node[runid])
uid = self.runid_to_uid[runid]
pid = self.runid_to_pid[runid]
status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
if status_before == STATUS_WAIT:
status = STATUS_FINISHED
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)
else:
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)
if status == STATUS_FINISHED:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.pop(pos)
pos = "end"
else:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
if status == STATUS_WAIT:
self.var_wait.set(int(self.var_wait.get()) + 1)
self.var_finish.set(int(self.var_finish.get()) - 1)
else:
self.var_wait.set(int(self.var_wait.get()) - 1)
self.var_finish.set(int(self.var_finish.get()) + 1)
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
self.runid_to_node[runid] = node
def listen(self):
while True:
msg = R.blpop(QUEUE_NAME, 0)[1]
R.rpush(BACKUP_QUEUE_NAME, msg)
bak = msg.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)
self.have_uid_pid.add("%d_%d" % (uid, pid))
elif "%d_%d" % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
self.have_uid_pid.add("%d_%d" % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = "end"
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos),
values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')
parser.add_argument('--namefile', dest='namefile', required=True, type=str, help='such as namefile.json')
parser.add_argument('--problemfile', dest='problemfile', required=True, type=str, help='such as problemfile.json')
parser.add_argument('--redishost', dest='redishost', required=True, type=str, help='such as 127.0.0.1')
parser.add_argument('--redisport', dest='redisport', required=True, type=int, help='such as 6379')
parser.add_argument('--contestid', dest='contestid', required=True, type=int, help='such as 9')
args = parser.parse_args()
R = redis.Redis(host=args.redishost, port=args.redisport)
CONTEST_ID = args.contestid
with open(args.namefile) as f:
NAME = json.loads(f.read())
with open(args.problemfile) as f:
PROBLEM_NAME = json.loads(f.read())
QUEUE_NAME = "ballon_%d" % CONTEST_ID
BACKUP_QUEUE_NAME = "ballon_bak_%d" % CONTEST_ID
PrinterTkinter()
|
flexible
|
{
"blob_id": "76e1f811d06af0e6e83ae989a236a5cd22c55e01",
"index": 2985,
"step-1": "<mask token>\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = left + right >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return 'status_%d_%d' % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return 'user: %s' % user_id\n\n\n<mask token>\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = left + right >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return 'status_%d_%d' % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return 'user: %s' % user_id\n\n\ndef get_problem_color(pid):\n pid = str(pid)\n if pid in PROBLEM_NAME:\n return PROBLEM_NAME[pid]\n else:\n return str(pid)\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')\n parser.add_argument('--namefile', dest='namefile', required=True, type=\n str, help='such as namefile.json')\n parser.add_argument('--problemfile', dest='problemfile', required=True,\n type=str, help='such as problemfile.json')\n parser.add_argument('--redishost', dest='redishost', required=True,\n type=str, help='such as 127.0.0.1')\n parser.add_argument('--redisport', dest='redisport', required=True,\n type=int, help='such as 6379')\n parser.add_argument('--contestid', dest='contestid', required=True,\n type=int, help='such as 9')\n args = parser.parse_args()\n R = redis.Redis(host=args.redishost, port=args.redisport)\n CONTEST_ID = args.contestid\n with open(args.namefile) as f:\n NAME = json.loads(f.read())\n with open(args.problemfile) as f:\n PROBLEM_NAME = json.loads(f.read())\n QUEUE_NAME = 'ballon_%d' % CONTEST_ID\n BACKUP_QUEUE_NAME = 'ballon_bak_%d' % CONTEST_ID\n PrinterTkinter()\n",
"step-4": "import argparse\nimport redis\nfrom Tkinter import *\nimport ttk\nimport json\nimport time\nimport thread\nR = None\nNAME = {}\nPROBLEM_NAME = {}\nCONTEST_ID = None\nQUEUE_NAME = None\nBACKUP_QUEUE_NAME = None\nRUNID_FIELD = 'runid'\nSUBMIT_TIME_FIELD = 'submit_time'\nSTATUS_FIELD = 'status'\nSTATUS_FINISHED = 'finished'\nSTATUS_WAIT = 'wait'\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = left + right >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return 'status_%d_%d' % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return 'user: %s' % user_id\n\n\ndef get_problem_color(pid):\n pid = str(pid)\n if pid in PROBLEM_NAME:\n return PROBLEM_NAME[pid]\n else:\n return str(pid)\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')\n parser.add_argument('--namefile', dest='namefile', required=True, type=\n str, help='such as namefile.json')\n parser.add_argument('--problemfile', dest='problemfile', required=True,\n type=str, help='such as problemfile.json')\n parser.add_argument('--redishost', dest='redishost', required=True,\n type=str, help='such as 127.0.0.1')\n parser.add_argument('--redisport', dest='redisport', required=True,\n type=int, help='such as 6379')\n parser.add_argument('--contestid', dest='contestid', required=True,\n type=int, help='such as 9')\n args = parser.parse_args()\n R = redis.Redis(host=args.redishost, port=args.redisport)\n CONTEST_ID = args.contestid\n with open(args.namefile) as f:\n NAME = json.loads(f.read())\n with open(args.problemfile) as f:\n PROBLEM_NAME = json.loads(f.read())\n QUEUE_NAME = 'ballon_%d' % CONTEST_ID\n BACKUP_QUEUE_NAME = 'ballon_bak_%d' % CONTEST_ID\n PrinterTkinter()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport redis\nfrom Tkinter import *\nimport ttk\nimport json\nimport time\nimport thread\n\nR = None\nNAME = {}\nPROBLEM_NAME = {}\nCONTEST_ID = None\n\nQUEUE_NAME = None\nBACKUP_QUEUE_NAME = None\nRUNID_FIELD = \"runid\"\nSUBMIT_TIME_FIELD = \"submit_time\"\nSTATUS_FIELD = \"status\"\nSTATUS_FINISHED = \"finished\"\nSTATUS_WAIT = \"wait\"\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = (left + right) >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return \"status_%d_%d\" % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return \"user: %s\" % user_id\n\n\ndef get_problem_color(pid):\n pid = str(pid)\n if pid in PROBLEM_NAME:\n return PROBLEM_NAME[pid]\n else:\n return str(pid)\n\n\nclass PrinterTkinter:\n def __init__(self):\n self.root = Tk()\n self.root.title(\"气球发放\")\n\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n\n # 定义左上方区域\n self.left_top_title = Label(self.frame_left_top, text=\"发放状态:\", font=('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW, padx=50, pady=30)\n\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text=\"已发放\", font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable=self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text=\"未发放\", font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top, textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n\n # 定义右上方区域\n self.var_entry = StringVar()\n\n self.right_top_title = Label(self.frame_right_top, text=\"切换状态(输入runid):\", font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=self.var_entry)\n\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text=\"确定\", command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n\n\n # 定义中心列表区域\n self.tree = ttk.Treeview(self.frame_center, show=\"headings\", height=18, columns=(\"a\", \"b\", \"c\", \"d\", \"e\"))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL, command=self.tree.yview)\n # 定义树形结构与滚动条\n self.tree.configure(yscrollcommand=self.vbar.set)\n\n # 表格的标题\n self.tree.column(\"a\", width=50, anchor=\"center\")\n self.tree.column(\"b\", width=150, anchor=\"center\")\n self.tree.column(\"c\", width=150, anchor=\"center\")\n self.tree.column(\"d\", width=200, anchor=\"center\")\n self.tree.column(\"e\", width=150, anchor=\"center\")\n self.tree.heading(\"a\", text=\"Runid\")\n self.tree.heading(\"b\", text=\"User\")\n self.tree.heading(\"c\", text=\"Problem\")\n self.tree.heading(\"d\", text=\"Time\")\n self.tree.heading(\"e\", text=\"Status\")\n\n # 调用方法获取表格内容插入\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n\n # 整体区域定位\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n # 表格内容插入\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n elif \"%d_%d\" % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = \"end\"\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n\n node = self.tree.insert(\"\", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not (runid in self.runid_to_node):\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = \"end\"\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert(\"\", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))\n\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n elif \"%d_%d\" % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = \"end\"\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n\n node = self.tree.insert(\"\", str(pos),\n values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')\n parser.add_argument('--namefile', dest='namefile', required=True, type=str, help='such as namefile.json')\n parser.add_argument('--problemfile', dest='problemfile', required=True, type=str, help='such as problemfile.json')\n parser.add_argument('--redishost', dest='redishost', required=True, type=str, help='such as 127.0.0.1')\n parser.add_argument('--redisport', dest='redisport', required=True, type=int, help='such as 6379')\n parser.add_argument('--contestid', dest='contestid', required=True, type=int, help='such as 9')\n args = parser.parse_args()\n\n R = redis.Redis(host=args.redishost, port=args.redisport)\n CONTEST_ID = args.contestid\n with open(args.namefile) as f:\n NAME = json.loads(f.read())\n with open(args.problemfile) as f:\n PROBLEM_NAME = json.loads(f.read())\n\n QUEUE_NAME = \"ballon_%d\" % CONTEST_ID\n BACKUP_QUEUE_NAME = \"ballon_bak_%d\" % CONTEST_ID\n\n PrinterTkinter()\n",
"step-ids": [
5,
8,
10,
12,
13
]
}
|
[
5,
8,
10,
12,
13
] |
<|reserved_special_token_0|>
class NET(nn.Module):
<|reserved_special_token_0|>
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NET(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.conv3 = nn.Conv2d(128, 64, 5)
x = torch.randn(86, 86).view(-1, 1, 86, 86)
self.boyut = None
self.uzunluk(x)
self.fkl1 = nn.Linear(self.boyut, 512)
self.fkl2 = nn.Linear(512, 3)
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
device = torch.device(0)
class NET(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.conv3 = nn.Conv2d(128, 64, 5)
x = torch.randn(86, 86).view(-1, 1, 86, 86)
self.boyut = None
self.uzunluk(x)
self.fkl1 = nn.Linear(self.boyut, 512)
self.fkl2 = nn.Linear(512, 3)
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
<|reserved_special_token_1|>
import torch, cv2, os, time
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
device = torch.device(0)
class NET(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.conv3 = nn.Conv2d(128, 64, 5)
x = torch.randn(86, 86).view(-1, 1, 86, 86)
self.boyut = None
self.uzunluk(x)
self.fkl1 = nn.Linear(self.boyut, 512)
self.fkl2 = nn.Linear(512, 3)
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
<|reserved_special_token_1|>
import torch,cv2,os,time
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# GPU kullanımı
device=torch.device(0)
class NET(nn.Module):
def __init__(self):
super(). __init__()
self.conv1=nn.Conv2d(1,64,5)
self.conv2=nn.Conv2d(64,128,5)
self.conv3=nn.Conv2d(128,64,5)
x=torch.randn(86,86).view(-1,1,86,86)
self.boyut=None
self.uzunluk(x)
self.fkl1=nn.Linear(self.boyut,512)
self.fkl2=nn.Linear(512,3)
def uzunluk(self,x):
x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x=F.max_pool2d(F.relu(self.conv2(x)),(2,2))
x=F.max_pool2d(F.relu(self.conv3(x)),(2,2))
if self.boyut is None:
self.boyut=x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
return x
def forward(self,x):
x=self.uzunluk(x)
x=x.view(-1,self.boyut)
x=F.relu(self.fkl1(x))
x=F.softmax(self.fkl2(x))
return x
|
flexible
|
{
"blob_id": "ad63beedc460b3d64a51d0b1f81f8e44cb559749",
"index": 1655,
"step-1": "<mask token>\n\n\nclass NET(nn.Module):\n <mask token>\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-2": "<mask token>\n\n\nclass NET(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, 5)\n self.conv2 = nn.Conv2d(64, 128, 5)\n self.conv3 = nn.Conv2d(128, 64, 5)\n x = torch.randn(86, 86).view(-1, 1, 86, 86)\n self.boyut = None\n self.uzunluk(x)\n self.fkl1 = nn.Linear(self.boyut, 512)\n self.fkl2 = nn.Linear(512, 3)\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-3": "<mask token>\ndevice = torch.device(0)\n\n\nclass NET(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, 5)\n self.conv2 = nn.Conv2d(64, 128, 5)\n self.conv3 = nn.Conv2d(128, 64, 5)\n x = torch.randn(86, 86).view(-1, 1, 86, 86)\n self.boyut = None\n self.uzunluk(x)\n self.fkl1 = nn.Linear(self.boyut, 512)\n self.fkl2 = nn.Linear(512, 3)\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-4": "import torch, cv2, os, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\ndevice = torch.device(0)\n\n\nclass NET(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, 5)\n self.conv2 = nn.Conv2d(64, 128, 5)\n self.conv3 = nn.Conv2d(128, 64, 5)\n x = torch.randn(86, 86).view(-1, 1, 86, 86)\n self.boyut = None\n self.uzunluk(x)\n self.fkl1 = nn.Linear(self.boyut, 512)\n self.fkl2 = nn.Linear(512, 3)\n\n def uzunluk(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n if self.boyut is None:\n self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]\n return x\n\n def forward(self, x):\n x = self.uzunluk(x)\n x = x.view(-1, self.boyut)\n x = F.relu(self.fkl1(x))\n x = F.softmax(self.fkl2(x))\n return x\n",
"step-5": "import torch,cv2,os,time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\n\r\n\r\n# GPU kullanımı\r\ndevice=torch.device(0)\r\n\r\n\r\nclass NET(nn.Module):\r\n def __init__(self):\r\n super(). __init__()\r\n self.conv1=nn.Conv2d(1,64,5)\r\n self.conv2=nn.Conv2d(64,128,5)\r\n self.conv3=nn.Conv2d(128,64,5)\r\n \r\n x=torch.randn(86,86).view(-1,1,86,86)\r\n \r\n self.boyut=None\r\n self.uzunluk(x)\r\n \r\n self.fkl1=nn.Linear(self.boyut,512)\r\n self.fkl2=nn.Linear(512,3)\r\n def uzunluk(self,x):\r\n \r\n x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))\r\n x=F.max_pool2d(F.relu(self.conv2(x)),(2,2))\r\n x=F.max_pool2d(F.relu(self.conv3(x)),(2,2))\r\n \r\n if self.boyut is None:\r\n self.boyut=x[0].shape[0]*x[0].shape[1]*x[0].shape[2]\r\n return x\r\n def forward(self,x):\r\n x=self.uzunluk(x)\r\n x=x.view(-1,self.boyut)\r\n \r\n x=F.relu(self.fkl1(x))\r\n x=F.softmax(self.fkl2(x))\r\n \r\n return x\r\n\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('products', '0007_auto_20150904_1320'),
]
operations = [
migrations.AddField(
model_name='customer',
name='in_close',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='customer',
name='time_close',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, 205639)),
),
migrations.AddField(
model_name='historicalcustomer',
name='in_close',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='historicalcustomer',
name='time_close',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, 205639)),
),
]
|
normal
|
{
"blob_id": "fd52379d125d6215fe12b6e01aa568949511549d",
"index": 6964,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('products', '0007_auto_20150904_1320')]\n operations = [migrations.AddField(model_name='customer', name=\n 'in_close', field=models.BooleanField(default=False)), migrations.\n AddField(model_name='customer', name='time_close', field=models.\n DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, \n 205639))), migrations.AddField(model_name='historicalcustomer',\n name='in_close', field=models.BooleanField(default=False)),\n migrations.AddField(model_name='historicalcustomer', name=\n 'time_close', field=models.DateTimeField(default=datetime.datetime(\n 2015, 11, 26, 23, 25, 34, 205639)))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n dependencies = [('products', '0007_auto_20150904_1320')]\n operations = [migrations.AddField(model_name='customer', name=\n 'in_close', field=models.BooleanField(default=False)), migrations.\n AddField(model_name='customer', name='time_close', field=models.\n DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, \n 205639))), migrations.AddField(model_name='historicalcustomer',\n name='in_close', field=models.BooleanField(default=False)),\n migrations.AddField(model_name='historicalcustomer', name=\n 'time_close', field=models.DateTimeField(default=datetime.datetime(\n 2015, 11, 26, 23, 25, 34, 205639)))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0007_auto_20150904_1320'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='customer',\n name='in_close',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='customer',\n name='time_close',\n field=models.DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, 205639)),\n ),\n migrations.AddField(\n model_name='historicalcustomer',\n name='in_close',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='historicalcustomer',\n name='time_close',\n field=models.DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, 205639)),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# import visual_servoing_utils_main as utils
from autolab_core import rigid_transformations as rt
from yumipy import YuMiState
class YumiConstants:
T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
from_frame='gripper', to_frame='obj')
T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256, -0.15060002, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],
from_frame='home', to_frame='yumi')
T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.52070004, 0.07340001, 0.3574],
from_frame='home', to_frame='yumi')
T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],
from_frame='home', to_frame='yumi')
T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],
from_frame='board', to_frame='yumi')
board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.42971, -0.004, -0.057],
from_frame='yumi', to_frame='world')
T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 - 8 * 0.0375, 0.0837],
from_frame='home', to_frame='yumi')
T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 + 8 * 0.0375, 0.0837],
# translation=[0.3984, 0 + 8*0.0375, 0.0837],
from_frame='home', to_frame='yumi')
right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])
left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])
right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])
left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])
|
normal
|
{
"blob_id": "34c81b9318d978305748d413c869a86ee6709e2c",
"index": 996,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass YumiConstants:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"step-4": "from autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"step-5": "# import visual_servoing_utils_main as utils\nfrom autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\nclass YumiConstants:\n\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n from_frame='gripper', to_frame='obj')\n\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256, -0.15060002, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.52070004, 0.07340001, 0.3574],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],\n from_frame='home', to_frame='yumi')\n\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n\n\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.42971, -0.004, -0.057],\n from_frame='yumi', to_frame='world')\n\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 - 8 * 0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 + 8 * 0.0375, 0.0837],\n # translation=[0.3984, 0 + 8*0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])\n\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for h in range(11, 41):
for i in range(model_img_array.shape[0]):
for j in range(model_img_array.shape[2]):
dis = np.sqrt(pow(13 - i, 2) + pow(9 - j, 2))
if dis <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(11 - i, 2) + pow(14 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(9 - i, 2) + pow(19 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(8 - i, 2) + pow(10 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(5 - i, 2) + pow(15 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(2 - i, 2) + pow(10 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
<|reserved_special_token_0|>
model_img.SetSpacing(spacing)
sitk.WriteImage(model_img, '../../data/ground_data/model_img.nii')
<|reserved_special_token_0|>
print(np.sum(h))
<|reserved_special_token_0|>
sitk.WriteImage(h_img, '../../data/ground_data/h.nii')
<|reserved_special_token_0|>
print(h_pad)
<|reserved_special_token_0|>
recon_img.SetSpacing(spacing)
sitk.WriteImage(recon_img, '../../data/ground_data/recon.nii')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
new_img = sitk.ReadImage('../../data/ground_data/new_img.nii')
spacing = new_img.GetSpacing()
new_img_array = sitk.GetArrayFromImage(new_img)
model_img_array = np.zeros(new_img_array.shape)
yu = 0.1
for h in range(11, 41):
for i in range(model_img_array.shape[0]):
for j in range(model_img_array.shape[2]):
dis = np.sqrt(pow(13 - i, 2) + pow(9 - j, 2))
if dis <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(11 - i, 2) + pow(14 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(9 - i, 2) + pow(19 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(8 - i, 2) + pow(10 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(5 - i, 2) + pow(15 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(2 - i, 2) + pow(10 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
model_img = sitk.GetImageFromArray(model_img_array)
model_img.SetSpacing(spacing)
sitk.WriteImage(model_img, '../../data/ground_data/model_img.nii')
img_array = sitk.GetArrayFromImage(sitk.ReadImage(
'../../data/ground_data/0.709_0.5_0.75VVHR_0.236_211_211_111_60itr_1sub.nii'
))
model_img_array = sitk.GetArrayFromImage(model_img)
new_img_f = fftn(new_img_array)
model_f = fftn(model_img_array)
H = new_img_f / (model_f + 0.0001)
h = np.fft.ifftn(H)
h = np.real(h)
h = (h - h.min()) / (h.max() - h.min())
h = h / np.sum(h)
print(np.sum(h))
h_img = sitk.GetImageFromArray(h)
sitk.WriteImage(h_img, '../../data/ground_data/h.nii')
h_pad = np.lib.pad(h, ((0, img_array.shape[0] - h.shape[0]), (0, img_array.
shape[1] - h.shape[1]), (0, img_array.shape[2] - h.shape[2])),
'constant', constant_values=0)
print(h_pad)
H_pad = fftn(h_pad)
img_f = fftn(img_array)
recon = img_f / (H_pad + H.any().min() + 0.0001)
recon = ifftn(recon)
recon = np.real(recon)
recon_img = sitk.GetImageFromArray(recon)
recon_img.SetSpacing(spacing)
sitk.WriteImage(recon_img, '../../data/ground_data/recon.nii')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import SimpleITK as sitk
import skimage.restoration.deconvolution
from numpy.fft import fftn, ifftn
new_img = sitk.ReadImage('../../data/ground_data/new_img.nii')
spacing = new_img.GetSpacing()
new_img_array = sitk.GetArrayFromImage(new_img)
model_img_array = np.zeros(new_img_array.shape)
yu = 0.1
for h in range(11, 41):
for i in range(model_img_array.shape[0]):
for j in range(model_img_array.shape[2]):
dis = np.sqrt(pow(13 - i, 2) + pow(9 - j, 2))
if dis <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(11 - i, 2) + pow(14 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(9 - i, 2) + pow(19 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(8 - i, 2) + pow(10 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(5 - i, 2) + pow(15 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow(2 - i, 2) + pow(10 - j, 2)) <= 2:
model_img_array[i, h, j] = yu
model_img = sitk.GetImageFromArray(model_img_array)
model_img.SetSpacing(spacing)
sitk.WriteImage(model_img, '../../data/ground_data/model_img.nii')
img_array = sitk.GetArrayFromImage(sitk.ReadImage(
'../../data/ground_data/0.709_0.5_0.75VVHR_0.236_211_211_111_60itr_1sub.nii'
))
model_img_array = sitk.GetArrayFromImage(model_img)
new_img_f = fftn(new_img_array)
model_f = fftn(model_img_array)
H = new_img_f / (model_f + 0.0001)
h = np.fft.ifftn(H)
h = np.real(h)
h = (h - h.min()) / (h.max() - h.min())
h = h / np.sum(h)
print(np.sum(h))
h_img = sitk.GetImageFromArray(h)
sitk.WriteImage(h_img, '../../data/ground_data/h.nii')
h_pad = np.lib.pad(h, ((0, img_array.shape[0] - h.shape[0]), (0, img_array.
shape[1] - h.shape[1]), (0, img_array.shape[2] - h.shape[2])),
'constant', constant_values=0)
print(h_pad)
H_pad = fftn(h_pad)
img_f = fftn(img_array)
recon = img_f / (H_pad + H.any().min() + 0.0001)
recon = ifftn(recon)
recon = np.real(recon)
recon_img = sitk.GetImageFromArray(recon)
recon_img.SetSpacing(spacing)
sitk.WriteImage(recon_img, '../../data/ground_data/recon.nii')
<|reserved_special_token_1|>
#!usr/bin/env python
# -*- coding:utf-8 _*
"""
@File : build_model_2.py
@Author : ljt
@Description: xx
@Time : 2021/6/12 21:46
"""
import numpy as np
import SimpleITK as sitk
import skimage.restoration.deconvolution
from numpy.fft import fftn, ifftn
new_img = sitk.ReadImage("../../data/ground_data/new_img.nii")
spacing = new_img.GetSpacing()
# 原始SimpleITK数据的存储形式为(Width, Height, Depth)即(X,Y,Z)
# 使用GetArrayFromImage()方法后,X轴与Z轴发生了对调
# 输出形状为:(Depth, Height, Width)即(Z,Y,X)。
new_img_array = sitk.GetArrayFromImage(new_img)
model_img_array = np.zeros((new_img_array.shape))
# [10, 19, 14]
# [15, 16, 12]
# [20, 25, 10]
# [11, 32, 9]
# [16, 16, 6]
# [11, 19, 3]
# h -> 11-41
yu = 0.1
for h in range(11,41):
for i in range(model_img_array.shape[0]):
for j in range(model_img_array.shape[2]):
dis = np.sqrt(pow((13 - i), 2) + pow((9 - j), 2))
if dis <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow((11 - i), 2) + pow((14 - j), 2)) <=2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow((9 - i), 2) + pow((19 - j), 2)) <=2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow((8 - i), 2) + pow((10 - j), 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow((5 - i), 2) + pow((15 - j), 2)) <= 2:
model_img_array[i, h, j] = yu
elif np.sqrt(pow((2 - i), 2) + pow((10 - j), 2)) <= 2:
model_img_array[i, h, j] = yu
# else:
# print(new_img_array[i, h, j])
model_img = sitk.GetImageFromArray(model_img_array)
model_img.SetSpacing(spacing)
sitk.WriteImage(model_img, "../../data/ground_data/model_img.nii")
img_array = sitk.GetArrayFromImage(sitk.ReadImage(
"../../data/ground_data/0.709_0.5_0.75VVHR_0.236_211_211_111_60itr_1sub.nii"))
model_img_array = sitk.GetArrayFromImage(model_img)
new_img_f = fftn(new_img_array)
model_f = fftn(model_img_array)
H = new_img_f / (model_f + 0.0001)
h = np.fft.ifftn(H)
h = np.real(h)
h = (h -h.min()) / (h.max() - h.min())
h =h / np.sum(h)
print(np.sum(h))
h_img = sitk.GetImageFromArray(h)
sitk.WriteImage(h_img, "../../data/ground_data/h.nii")
h_pad = np.lib.pad(h, ((0,img_array.shape[0] - h.shape[0]), (0, img_array.shape[1] - h.shape[1]), (0, img_array.shape[2] - h.shape[2])), 'constant', constant_values=(0))
print(h_pad)
H_pad = fftn(h_pad)
img_f = fftn(img_array)
# recon = model_f / (H + 0.0001)
recon = img_f / (H_pad + H.any().min() + 0.0001)
# print(recon)
recon = ifftn(recon)
recon = np.real(recon)
# lucy_richoid
# recon = skimage.restoration.deconvolution.richardson_lucy(img_array, h, iterations=3)
recon_img = sitk.GetImageFromArray(recon)
recon_img.SetSpacing(spacing)
sitk.WriteImage(recon_img, "../../data/ground_data/recon.nii")
|
flexible
|
{
"blob_id": "f84ab1530cbc6bd25c45fc607d8f1cd461b180bf",
"index": 2089,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor h in range(11, 41):\n for i in range(model_img_array.shape[0]):\n for j in range(model_img_array.shape[2]):\n dis = np.sqrt(pow(13 - i, 2) + pow(9 - j, 2))\n if dis <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(11 - i, 2) + pow(14 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(9 - i, 2) + pow(19 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(8 - i, 2) + pow(10 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(5 - i, 2) + pow(15 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(2 - i, 2) + pow(10 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n<mask token>\nmodel_img.SetSpacing(spacing)\nsitk.WriteImage(model_img, '../../data/ground_data/model_img.nii')\n<mask token>\nprint(np.sum(h))\n<mask token>\nsitk.WriteImage(h_img, '../../data/ground_data/h.nii')\n<mask token>\nprint(h_pad)\n<mask token>\nrecon_img.SetSpacing(spacing)\nsitk.WriteImage(recon_img, '../../data/ground_data/recon.nii')\n",
"step-3": "<mask token>\nnew_img = sitk.ReadImage('../../data/ground_data/new_img.nii')\nspacing = new_img.GetSpacing()\nnew_img_array = sitk.GetArrayFromImage(new_img)\nmodel_img_array = np.zeros(new_img_array.shape)\nyu = 0.1\nfor h in range(11, 41):\n for i in range(model_img_array.shape[0]):\n for j in range(model_img_array.shape[2]):\n dis = np.sqrt(pow(13 - i, 2) + pow(9 - j, 2))\n if dis <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(11 - i, 2) + pow(14 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(9 - i, 2) + pow(19 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(8 - i, 2) + pow(10 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(5 - i, 2) + pow(15 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(2 - i, 2) + pow(10 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\nmodel_img = sitk.GetImageFromArray(model_img_array)\nmodel_img.SetSpacing(spacing)\nsitk.WriteImage(model_img, '../../data/ground_data/model_img.nii')\nimg_array = sitk.GetArrayFromImage(sitk.ReadImage(\n '../../data/ground_data/0.709_0.5_0.75VVHR_0.236_211_211_111_60itr_1sub.nii'\n ))\nmodel_img_array = sitk.GetArrayFromImage(model_img)\nnew_img_f = fftn(new_img_array)\nmodel_f = fftn(model_img_array)\nH = new_img_f / (model_f + 0.0001)\nh = np.fft.ifftn(H)\nh = np.real(h)\nh = (h - h.min()) / (h.max() - h.min())\nh = h / np.sum(h)\nprint(np.sum(h))\nh_img = sitk.GetImageFromArray(h)\nsitk.WriteImage(h_img, '../../data/ground_data/h.nii')\nh_pad = np.lib.pad(h, ((0, img_array.shape[0] - h.shape[0]), (0, img_array.\n shape[1] - h.shape[1]), (0, img_array.shape[2] - h.shape[2])),\n 'constant', constant_values=0)\nprint(h_pad)\nH_pad = fftn(h_pad)\nimg_f = fftn(img_array)\nrecon = img_f / (H_pad + H.any().min() + 0.0001)\nrecon = ifftn(recon)\nrecon = np.real(recon)\nrecon_img = sitk.GetImageFromArray(recon)\nrecon_img.SetSpacing(spacing)\nsitk.WriteImage(recon_img, '../../data/ground_data/recon.nii')\n",
"step-4": "<mask token>\nimport numpy as np\nimport SimpleITK as sitk\nimport skimage.restoration.deconvolution\nfrom numpy.fft import fftn, ifftn\nnew_img = sitk.ReadImage('../../data/ground_data/new_img.nii')\nspacing = new_img.GetSpacing()\nnew_img_array = sitk.GetArrayFromImage(new_img)\nmodel_img_array = np.zeros(new_img_array.shape)\nyu = 0.1\nfor h in range(11, 41):\n for i in range(model_img_array.shape[0]):\n for j in range(model_img_array.shape[2]):\n dis = np.sqrt(pow(13 - i, 2) + pow(9 - j, 2))\n if dis <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(11 - i, 2) + pow(14 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(9 - i, 2) + pow(19 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(8 - i, 2) + pow(10 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(5 - i, 2) + pow(15 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow(2 - i, 2) + pow(10 - j, 2)) <= 2:\n model_img_array[i, h, j] = yu\nmodel_img = sitk.GetImageFromArray(model_img_array)\nmodel_img.SetSpacing(spacing)\nsitk.WriteImage(model_img, '../../data/ground_data/model_img.nii')\nimg_array = sitk.GetArrayFromImage(sitk.ReadImage(\n '../../data/ground_data/0.709_0.5_0.75VVHR_0.236_211_211_111_60itr_1sub.nii'\n ))\nmodel_img_array = sitk.GetArrayFromImage(model_img)\nnew_img_f = fftn(new_img_array)\nmodel_f = fftn(model_img_array)\nH = new_img_f / (model_f + 0.0001)\nh = np.fft.ifftn(H)\nh = np.real(h)\nh = (h - h.min()) / (h.max() - h.min())\nh = h / np.sum(h)\nprint(np.sum(h))\nh_img = sitk.GetImageFromArray(h)\nsitk.WriteImage(h_img, '../../data/ground_data/h.nii')\nh_pad = np.lib.pad(h, ((0, img_array.shape[0] - h.shape[0]), (0, img_array.\n shape[1] - h.shape[1]), (0, img_array.shape[2] - h.shape[2])),\n 'constant', constant_values=0)\nprint(h_pad)\nH_pad = fftn(h_pad)\nimg_f = fftn(img_array)\nrecon = img_f / (H_pad + H.any().min() + 0.0001)\nrecon = ifftn(recon)\nrecon = np.real(recon)\nrecon_img = sitk.GetImageFromArray(recon)\nrecon_img.SetSpacing(spacing)\nsitk.WriteImage(recon_img, '../../data/ground_data/recon.nii')\n",
"step-5": "#!usr/bin/env python\n# -*- coding:utf-8 _*\n\n\"\"\"\n@File : build_model_2.py\n@Author : ljt\n@Description: xx\n@Time : 2021/6/12 21:46 \n\"\"\"\n\n\nimport numpy as np\nimport SimpleITK as sitk\nimport skimage.restoration.deconvolution\nfrom numpy.fft import fftn, ifftn\n\n\nnew_img = sitk.ReadImage(\"../../data/ground_data/new_img.nii\")\nspacing = new_img.GetSpacing()\n# 原始SimpleITK数据的存储形式为(Width, Height, Depth)即(X,Y,Z)\n# 使用GetArrayFromImage()方法后,X轴与Z轴发生了对调\n# 输出形状为:(Depth, Height, Width)即(Z,Y,X)。\nnew_img_array = sitk.GetArrayFromImage(new_img)\n\nmodel_img_array = np.zeros((new_img_array.shape))\n\n\n# [10, 19, 14]\n# [15, 16, 12]\n# [20, 25, 10]\n# [11, 32, 9]\n# [16, 16, 6]\n# [11, 19, 3]\n# h -> 11-41\n\nyu = 0.1\n\nfor h in range(11,41):\n for i in range(model_img_array.shape[0]):\n for j in range(model_img_array.shape[2]):\n dis = np.sqrt(pow((13 - i), 2) + pow((9 - j), 2))\n if dis <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow((11 - i), 2) + pow((14 - j), 2)) <=2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow((9 - i), 2) + pow((19 - j), 2)) <=2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow((8 - i), 2) + pow((10 - j), 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow((5 - i), 2) + pow((15 - j), 2)) <= 2:\n model_img_array[i, h, j] = yu\n elif np.sqrt(pow((2 - i), 2) + pow((10 - j), 2)) <= 2:\n model_img_array[i, h, j] = yu\n # else:\n # print(new_img_array[i, h, j])\n\n\n\nmodel_img = sitk.GetImageFromArray(model_img_array)\nmodel_img.SetSpacing(spacing)\nsitk.WriteImage(model_img, \"../../data/ground_data/model_img.nii\")\n\n\nimg_array = sitk.GetArrayFromImage(sitk.ReadImage(\n \"../../data/ground_data/0.709_0.5_0.75VVHR_0.236_211_211_111_60itr_1sub.nii\"))\n\nmodel_img_array = sitk.GetArrayFromImage(model_img)\nnew_img_f = fftn(new_img_array)\nmodel_f = fftn(model_img_array)\n\nH = new_img_f / (model_f + 0.0001)\nh = np.fft.ifftn(H)\nh = np.real(h)\n\nh = (h -h.min()) / (h.max() - h.min())\n\nh =h / np.sum(h)\nprint(np.sum(h))\nh_img = sitk.GetImageFromArray(h)\nsitk.WriteImage(h_img, \"../../data/ground_data/h.nii\")\nh_pad = np.lib.pad(h, ((0,img_array.shape[0] - h.shape[0]), (0, img_array.shape[1] - h.shape[1]), (0, img_array.shape[2] - h.shape[2])), 'constant', constant_values=(0))\nprint(h_pad)\n\nH_pad = fftn(h_pad)\n\n\n\nimg_f = fftn(img_array)\n\n# recon = model_f / (H + 0.0001)\nrecon = img_f / (H_pad + H.any().min() + 0.0001)\n\n# print(recon)\nrecon = ifftn(recon)\nrecon = np.real(recon)\n\n\n# lucy_richoid\n# recon = skimage.restoration.deconvolution.richardson_lucy(img_array, h, iterations=3)\n\n\nrecon_img = sitk.GetImageFromArray(recon)\nrecon_img.SetSpacing(spacing)\nsitk.WriteImage(recon_img, \"../../data/ground_data/recon.nii\")\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_1_items = (prototypes.AccountItemsPrototype.
get_by_account_id(self.account_1.id))
self.collection_1 = prototypes.CollectionPrototype.create(caption=
'collection_1', description='description_1')
self.collection_2 = prototypes.CollectionPrototype.create(caption=
'collection_2', description='description_2', approved=True)
self.kit_1 = prototypes.KitPrototype.create(collection=self.
collection_1, caption='kit_1', description='description_1')
self.kit_2 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_2', description='description_2',
approved=True)
self.kit_3 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_3', description='description_3',
approved=True)
self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_1', text='text_1_1', approved=False)
self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_2', text='text_1_2', approved=True)
self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_1', text='text_2_1', approved=True)
self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_2', text='text_2_2', approved=False)
self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,
caption='item_3_1', text='text_3_1', approved=True)
<|reserved_special_token_0|>
def test_get_items_count__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.
_db_filter(id__in=self.account_1_items.items_ids())), (
collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})
)
def test_get_collections_statistics__no_account(self):
self.assertEqual(logic.get_collections_statistics(None), {
'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1,
self.kit_3.id: 1}), 'account_items_in_collections': {},
'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})
def test_get_collections_statistics__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_collections_statistics(self.
account_1_items), {'total_items_in_collections': {self.
collection_2.id: 2}, 'total_items_in_kits': collections.Counter
({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {self.collection_2.id: 1},
'account_items_in_kits': collections.Counter({self.kit_3.id: 1}
), 'total_items': 2, 'account_items': 1})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_1_items = (prototypes.AccountItemsPrototype.
get_by_account_id(self.account_1.id))
self.collection_1 = prototypes.CollectionPrototype.create(caption=
'collection_1', description='description_1')
self.collection_2 = prototypes.CollectionPrototype.create(caption=
'collection_2', description='description_2', approved=True)
self.kit_1 = prototypes.KitPrototype.create(collection=self.
collection_1, caption='kit_1', description='description_1')
self.kit_2 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_2', description='description_2',
approved=True)
self.kit_3 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_3', description='description_3',
approved=True)
self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_1', text='text_1_1', approved=False)
self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_2', text='text_1_2', approved=True)
self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_1', text='text_2_1', approved=True)
self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_2', text='text_2_2', approved=False)
self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,
caption='item_3_1', text='text_3_1', approved=True)
def test_get_items_count(self):
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.
_db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.
id: 1}), {self.collection_2.id: 2}))
def test_get_items_count__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.
_db_filter(id__in=self.account_1_items.items_ids())), (
collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})
)
def test_get_collections_statistics__no_account(self):
self.assertEqual(logic.get_collections_statistics(None), {
'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1,
self.kit_3.id: 1}), 'account_items_in_collections': {},
'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})
def test_get_collections_statistics__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_collections_statistics(self.
account_1_items), {'total_items_in_collections': {self.
collection_2.id: 2}, 'total_items_in_kits': collections.Counter
({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {self.collection_2.id: 1},
'account_items_in_kits': collections.Counter({self.kit_3.id: 1}
), 'total_items': 2, 'account_items': 1})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
smart_imports.all()
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_1_items = (prototypes.AccountItemsPrototype.
get_by_account_id(self.account_1.id))
self.collection_1 = prototypes.CollectionPrototype.create(caption=
'collection_1', description='description_1')
self.collection_2 = prototypes.CollectionPrototype.create(caption=
'collection_2', description='description_2', approved=True)
self.kit_1 = prototypes.KitPrototype.create(collection=self.
collection_1, caption='kit_1', description='description_1')
self.kit_2 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_2', description='description_2',
approved=True)
self.kit_3 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_3', description='description_3',
approved=True)
self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_1', text='text_1_1', approved=False)
self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_2', text='text_1_2', approved=True)
self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_1', text='text_2_1', approved=True)
self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_2', text='text_2_2', approved=False)
self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,
caption='item_3_1', text='text_3_1', approved=True)
def test_get_items_count(self):
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.
_db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.
id: 1}), {self.collection_2.id: 2}))
def test_get_items_count__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.
_db_filter(id__in=self.account_1_items.items_ids())), (
collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})
)
def test_get_collections_statistics__no_account(self):
self.assertEqual(logic.get_collections_statistics(None), {
'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1,
self.kit_3.id: 1}), 'account_items_in_collections': {},
'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})
def test_get_collections_statistics__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_collections_statistics(self.
account_1_items), {'total_items_in_collections': {self.
collection_2.id: 2}, 'total_items_in_kits': collections.Counter
({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {self.collection_2.id: 1},
'account_items_in_kits': collections.Counter({self.kit_3.id: 1}
), 'total_items': 2, 'account_items': 1})
<|reserved_special_token_1|>
import smart_imports
smart_imports.all()
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_1_items = (prototypes.AccountItemsPrototype.
get_by_account_id(self.account_1.id))
self.collection_1 = prototypes.CollectionPrototype.create(caption=
'collection_1', description='description_1')
self.collection_2 = prototypes.CollectionPrototype.create(caption=
'collection_2', description='description_2', approved=True)
self.kit_1 = prototypes.KitPrototype.create(collection=self.
collection_1, caption='kit_1', description='description_1')
self.kit_2 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_2', description='description_2',
approved=True)
self.kit_3 = prototypes.KitPrototype.create(collection=self.
collection_2, caption='kit_3', description='description_3',
approved=True)
self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_1', text='text_1_1', approved=False)
self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,
caption='item_1_2', text='text_1_2', approved=True)
self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_1', text='text_2_1', approved=True)
self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,
caption='item_2_2', text='text_2_2', approved=False)
self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,
caption='item_3_1', text='text_3_1', approved=True)
def test_get_items_count(self):
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.
_db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.
id: 1}), {self.collection_2.id: 2}))
def test_get_items_count__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.
_db_filter(id__in=self.account_1_items.items_ids())), (
collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})
)
def test_get_collections_statistics__no_account(self):
self.assertEqual(logic.get_collections_statistics(None), {
'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1,
self.kit_3.id: 1}), 'account_items_in_collections': {},
'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})
def test_get_collections_statistics__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_collections_statistics(self.
account_1_items), {'total_items_in_collections': {self.
collection_2.id: 2}, 'total_items_in_kits': collections.Counter
({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {self.collection_2.id: 1},
'account_items_in_kits': collections.Counter({self.kit_3.id: 1}
), 'total_items': 2, 'account_items': 1})
<|reserved_special_token_1|>
import smart_imports
smart_imports.all()
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_1_items = prototypes.AccountItemsPrototype.get_by_account_id(self.account_1.id)
self.collection_1 = prototypes.CollectionPrototype.create(caption='collection_1', description='description_1')
self.collection_2 = prototypes.CollectionPrototype.create(caption='collection_2', description='description_2', approved=True)
self.kit_1 = prototypes.KitPrototype.create(collection=self.collection_1, caption='kit_1', description='description_1')
self.kit_2 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_2', description='description_2', approved=True)
self.kit_3 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_3', description='description_3', approved=True)
self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_1', text='text_1_1', approved=False)
self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_2', text='text_1_2', approved=True)
self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_1', text='text_2_1', approved=True)
self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_2', text='text_2_2', approved=False)
self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3, caption='item_3_1', text='text_3_1', approved=True)
def test_get_items_count(self):
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_all()),
(collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}), {self.collection_2.id: 2}))
def test_get_items_count__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_filter(id__in=self.account_1_items.items_ids())),
(collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1}))
def test_get_collections_statistics__no_account(self):
self.assertEqual(logic.get_collections_statistics(None),
{'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {},
'account_items_in_kits': {},
'total_items': 2,
'account_items': 0})
def test_get_collections_statistics__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_collections_statistics(self.account_1_items),
{'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {self.collection_2.id: 1},
'account_items_in_kits': collections.Counter({self.kit_3.id: 1}),
'total_items': 2,
'account_items': 1})
|
flexible
|
{
"blob_id": "89e5e82c073f7f87c00fc844c861c6c5cbe6a695",
"index": 8893,
"step-1": "<mask token>\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n <mask token>\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-2": "<mask token>\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.\n id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-3": "<mask token>\nsmart_imports.all()\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.\n id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-4": "import smart_imports\nsmart_imports.all()\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n game_logic.create_test_map()\n self.account_1 = self.accounts_factory.create_account()\n self.account_1_items = (prototypes.AccountItemsPrototype.\n get_by_account_id(self.account_1.id))\n self.collection_1 = prototypes.CollectionPrototype.create(caption=\n 'collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption=\n 'collection_2', description='description_2', approved=True)\n self.kit_1 = prototypes.KitPrototype.create(collection=self.\n collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_2', description='description_2',\n approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.\n collection_2, caption='kit_3', description='description_3',\n approved=True)\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1,\n caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2,\n caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3,\n caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_all()), (collections.Counter({self.kit_2.id: 1, self.kit_3.\n id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype.\n _db_filter(id__in=self.account_1_items.items_ids())), (\n collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1})\n )\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None), {\n 'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1,\n self.kit_3.id: 1}), 'account_items_in_collections': {},\n 'account_items_in_kits': {}, 'total_items': 2, 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n self.assertEqual(logic.get_collections_statistics(self.\n account_1_items), {'total_items_in_collections': {self.\n collection_2.id: 2}, 'total_items_in_kits': collections.Counter\n ({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}\n ), 'total_items': 2, 'account_items': 1})\n",
"step-5": "\nimport smart_imports\n\nsmart_imports.all()\n\n\nclass LogicTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(LogicTests, self).setUp()\n\n game_logic.create_test_map()\n\n self.account_1 = self.accounts_factory.create_account()\n\n self.account_1_items = prototypes.AccountItemsPrototype.get_by_account_id(self.account_1.id)\n\n self.collection_1 = prototypes.CollectionPrototype.create(caption='collection_1', description='description_1')\n self.collection_2 = prototypes.CollectionPrototype.create(caption='collection_2', description='description_2', approved=True)\n\n self.kit_1 = prototypes.KitPrototype.create(collection=self.collection_1, caption='kit_1', description='description_1')\n self.kit_2 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_2', description='description_2', approved=True)\n self.kit_3 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_3', description='description_3', approved=True)\n\n self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_1', text='text_1_1', approved=False)\n self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_2', text='text_1_2', approved=True)\n self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_1', text='text_2_1', approved=True)\n self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_2', text='text_2_2', approved=False)\n self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3, caption='item_3_1', text='text_3_1', approved=True)\n\n def test_get_items_count(self):\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_all()),\n (collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}), {self.collection_2.id: 2}))\n\n def test_get_items_count__with_account(self):\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n\n self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_filter(id__in=self.account_1_items.items_ids())),\n (collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1}))\n\n def test_get_collections_statistics__no_account(self):\n self.assertEqual(logic.get_collections_statistics(None),\n {'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {},\n 'account_items_in_kits': {},\n 'total_items': 2,\n 'account_items': 0})\n\n def test_get_collections_statistics__with_account(self):\n\n self.account_1_items.add_item(self.item_3_1)\n self.account_1_items.save()\n\n self.assertEqual(logic.get_collections_statistics(self.account_1_items),\n {'total_items_in_collections': {self.collection_2.id: 2},\n 'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),\n 'account_items_in_collections': {self.collection_2.id: 1},\n 'account_items_in_kits': collections.Counter({self.kit_3.id: 1}),\n 'total_items': 2,\n 'account_items': 1})\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
class Ball:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Ball:
<|reserved_special_token_0|>
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
while 1:
ball.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# coding:utf-8
# 改进小红球
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts) # 打乱 starts
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height() # 获取高度坐标
self.canvas_width = self.canvas.winfo_width() # 获取宽度坐标
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id) # 获取坐标
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
# 把小球加入主循环
while 1:
ball.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
|
flexible
|
{
"blob_id": "cb1e73d172314c8d3d31f6e49fa67582375c0c58",
"index": 7183,
"step-1": "class Ball:\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Ball:\n <mask token>\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n\n<mask token>\n",
"step-3": "class Ball:\n\n def __init__(self, canvas, paddle, color):\n self.canvas = canvas\n self.paddle = paddle\n self.id = canvas.create_oval(10, 10, 25, 25, fill=color)\n self.canvas.move(self.id, 245, 100)\n starts = [-3, -2, -1, 1, 2, 3]\n random.shuffle(starts)\n self.x = starts[0]\n self.y = -3\n self.canvas_height = self.canvas.winfo_height()\n self.canvas_width = self.canvas.winfo_width()\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n\n<mask token>\n",
"step-4": "class Ball:\n\n def __init__(self, canvas, paddle, color):\n self.canvas = canvas\n self.paddle = paddle\n self.id = canvas.create_oval(10, 10, 25, 25, fill=color)\n self.canvas.move(self.id, 245, 100)\n starts = [-3, -2, -1, 1, 2, 3]\n random.shuffle(starts)\n self.x = starts[0]\n self.y = -3\n self.canvas_height = self.canvas.winfo_height()\n self.canvas_width = self.canvas.winfo_width()\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n\nwhile 1:\n ball.draw()\n tk.update_idletasks()\n tk.update()\n time.sleep(0.01)\n",
"step-5": "#!/usr/bin/env python3\n# coding:utf-8\n\n# 改进小红球\nclass Ball:\n def __init__(self, canvas, paddle, color):\n self.canvas = canvas\n self.paddle = paddle\n self.id = canvas.create_oval(10, 10, 25, 25, fill=color)\n self.canvas.move(self.id, 245, 100)\n starts = [-3, -2, -1, 1, 2, 3]\n random.shuffle(starts) # 打乱 starts\n self.x = starts[0]\n self.y = -3\n self.canvas_height = self.canvas.winfo_height() # 获取高度坐标\n self.canvas_width = self.canvas.winfo_width() # 获取宽度坐标\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id) # 获取坐标\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n# 把小球加入主循环\nwhile 1:\n ball.draw()\n tk.update_idletasks()\n tk.update()\n time.sleep(0.01)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pytest
from time import sleep
from timeflux.helpers.background import Task
class DummyWorker():
def echo(self, message='hello', delay=0, fail=False):
sleep(delay)
if fail: raise Exception('failed')
self.message = message
return(self.message)
def test_default(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
assert status['result'] == 'hello'
assert status['instance'].message == 'hello'
def test_args(working_path):
task = Task(DummyWorker(), 'echo', 'foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_kwargs(working_path):
task = Task(DummyWorker(), 'echo', message='foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_exception(working_path):
task = Task(DummyWorker(), 'echo', fail=True).start()
while not task.done:
status = task.status()
assert status['success'] == False
assert status['exception'].args[0] == 'failed'
def test_stop_running(working_path):
task = Task(DummyWorker(), 'echo', delay=5).start()
sleep(.5)
assert task.done == False
task.stop()
assert task.done == True
def test_stop_not_running(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
task.stop()
assert task.done == True
|
normal
|
{
"blob_id": "d2e46944ab05c5e8c1979101728b7b25900be342",
"index": 415,
"step-1": "<mask token>\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\n<mask token>\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\n<mask token>\n\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\n<mask token>\n\n\ndef test_kwargs(working_path):\n task = Task(DummyWorker(), 'echo', message='foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\ndef test_stop_not_running(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n task.stop()\n assert task.done == True\n",
"step-4": "import pytest\nfrom time import sleep\nfrom timeflux.helpers.background import Task\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\ndef test_args(working_path):\n task = Task(DummyWorker(), 'echo', 'foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\n\ndef test_kwargs(working_path):\n task = Task(DummyWorker(), 'echo', message='foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\ndef test_stop_not_running(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n task.stop()\n assert task.done == True\n",
"step-5": "import pytest\nfrom time import sleep\nfrom timeflux.helpers.background import Task\n\nclass DummyWorker():\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail: raise Exception('failed')\n self.message = message\n return(self.message)\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\ndef test_args(working_path):\n task = Task(DummyWorker(), 'echo', 'foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\ndef test_kwargs(working_path):\n task = Task(DummyWorker(), 'echo', message='foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\ndef test_stop_not_running(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n task.stop()\n assert task.done == True\n",
"step-ids": [
4,
5,
7,
9,
10
]
}
|
[
4,
5,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
pygame.init()
fuente = pygame.font.Font(None, 36)
pantalla = pygame.display.set_mode([ANCHO, ALTO])
pantalla.fill(BLANCO)
General = pygame.sprite.Group()
Jugadores = pygame.sprite.Group()
Frutas = pygame.sprite.Group()
Frutas_all = pygame.sprite.Group()
Flechas = pygame.sprite.Group()
Frutas_Pod = pygame.sprite.Group()
jugador = Jugador()
final = Final()
inicial = Inicial()
General.add(inicial)
General.add(jugador)
General.add(final)
Jugadores.add(jugador)
cereza_st = False
pausa = False
nc = 0
ncp = 0
vida = 3
salud = 340
var = 4
p = 0
np = 0
reloj = pygame.time.Clock()
fin_juego = False
fin = False
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
jugador.var_x = var
if event.key == pygame.K_LEFT:
jugador.var_x = -var
if event.key == pygame.K_SPACE:
jugador.var_x = 0
if event.key == pygame.K_UP:
proyectil = Flecha()
proyectil.rect.x = jugador.rect.x + 25
proyectil.rect.y = jugador.rect.y
Flechas.add(proyectil)
General.add(proyectil)
if event.key == pygame.K_p:
if pausa:
pausa = False
else:
pausa = True
if not fin_juego and not pausa:
pantalla.fill(BLANCO)
General.update()
General.draw(pantalla)
if salud < 20:
salud = 340
vida -= 1
pygame.draw.polygon(pantalla, AZUL, [[0, 395], [vida * 100, 395
], [vida * 100, 400], [0, 400]])
pygame.draw.polygon(pantalla, SALMON, [[10, 20], [10, salud], [
5, salud], [5, 20]])
points = fuente.render('Puntos= ' + str(p), True, NEGRO)
pantalla.blit(points, [10, 360])
pygame.display.flip()
lf_col = pygame.sprite.spritecollide(final, Frutas, True)
li_col = pygame.sprite.spritecollide(inicial, Flechas, True)
lff_col = pygame.sprite.groupcollide(Flechas, Frutas, True, True)
lffp_col = pygame.sprite.groupcollide(Flechas, Frutas_Pod, True,
True)
lj_col = pygame.sprite.spritecollide(jugador, Frutas, True)
ljp_col = pygame.sprite.spritecollide(jugador, Frutas_Pod, True)
if nc == 5:
cereza_pod = Cereza_podrida()
Frutas_Pod.add(cereza_pod)
General.add(cereza_pod)
nc = 0
if np >= 10 and vida < 6:
vida += 1
np = 0
if p >= 15 and p < 30:
var = 5
for element in Frutas:
element.var_y = 3
for element in Frutas_Pod:
element.var_y = 3
if p >= 30:
var = 7
for element in Frutas:
element.var_y = 5
for element in Frutas_Pod:
element.var_y = 5
if not cereza_st:
cereza = Cereza()
Frutas.add(cereza)
General.add(cereza)
nc += 1
cereza_st = True
for element in lffp_col:
p += 2
np += 2
if salud + 50 > 340:
salud = 340
else:
salud += 50
for element in lff_col:
p -= 1
np -= 1
cereza_st = False
for element in lf_col:
p -= 1
np -= 1
cereza_st = False
for element in lj_col:
p += 1
np += 1
if salud + 50 >= 340:
salud = 340
else:
salud += 50
cereza_st = False
for element in ljp_col:
vida -= 1
if vida == 0:
fin_juego = True
salud -= 0.3
reloj.tick(60)
else:
if pausa and not fin_juego:
texto = fuente.render('Pausa', True, NEGRO)
pantalla.blit(texto, [330, 280])
pygame.display.flip()
if fin_juego:
texto = fuente.render('Fin del juego', True, BLANCO)
pantalla.fill(NEGRO)
pantalla.blit(texto, [330, 280])
pygame.display.flip()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ALTO = 400
ANCHO = 600
ROJO = 255, 0, 0
SALMON = 240, 99, 99
BLANCO = 255, 255, 255
NEGRO = 0, 0, 0
AZUL = 59, 131, 189
VERDE = 0, 255, 0
if __name__ == '__main__':
pygame.init()
fuente = pygame.font.Font(None, 36)
pantalla = pygame.display.set_mode([ANCHO, ALTO])
pantalla.fill(BLANCO)
General = pygame.sprite.Group()
Jugadores = pygame.sprite.Group()
Frutas = pygame.sprite.Group()
Frutas_all = pygame.sprite.Group()
Flechas = pygame.sprite.Group()
Frutas_Pod = pygame.sprite.Group()
jugador = Jugador()
final = Final()
inicial = Inicial()
General.add(inicial)
General.add(jugador)
General.add(final)
Jugadores.add(jugador)
cereza_st = False
pausa = False
nc = 0
ncp = 0
vida = 3
salud = 340
var = 4
p = 0
np = 0
reloj = pygame.time.Clock()
fin_juego = False
fin = False
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
jugador.var_x = var
if event.key == pygame.K_LEFT:
jugador.var_x = -var
if event.key == pygame.K_SPACE:
jugador.var_x = 0
if event.key == pygame.K_UP:
proyectil = Flecha()
proyectil.rect.x = jugador.rect.x + 25
proyectil.rect.y = jugador.rect.y
Flechas.add(proyectil)
General.add(proyectil)
if event.key == pygame.K_p:
if pausa:
pausa = False
else:
pausa = True
if not fin_juego and not pausa:
pantalla.fill(BLANCO)
General.update()
General.draw(pantalla)
if salud < 20:
salud = 340
vida -= 1
pygame.draw.polygon(pantalla, AZUL, [[0, 395], [vida * 100, 395
], [vida * 100, 400], [0, 400]])
pygame.draw.polygon(pantalla, SALMON, [[10, 20], [10, salud], [
5, salud], [5, 20]])
points = fuente.render('Puntos= ' + str(p), True, NEGRO)
pantalla.blit(points, [10, 360])
pygame.display.flip()
lf_col = pygame.sprite.spritecollide(final, Frutas, True)
li_col = pygame.sprite.spritecollide(inicial, Flechas, True)
lff_col = pygame.sprite.groupcollide(Flechas, Frutas, True, True)
lffp_col = pygame.sprite.groupcollide(Flechas, Frutas_Pod, True,
True)
lj_col = pygame.sprite.spritecollide(jugador, Frutas, True)
ljp_col = pygame.sprite.spritecollide(jugador, Frutas_Pod, True)
if nc == 5:
cereza_pod = Cereza_podrida()
Frutas_Pod.add(cereza_pod)
General.add(cereza_pod)
nc = 0
if np >= 10 and vida < 6:
vida += 1
np = 0
if p >= 15 and p < 30:
var = 5
for element in Frutas:
element.var_y = 3
for element in Frutas_Pod:
element.var_y = 3
if p >= 30:
var = 7
for element in Frutas:
element.var_y = 5
for element in Frutas_Pod:
element.var_y = 5
if not cereza_st:
cereza = Cereza()
Frutas.add(cereza)
General.add(cereza)
nc += 1
cereza_st = True
for element in lffp_col:
p += 2
np += 2
if salud + 50 > 340:
salud = 340
else:
salud += 50
for element in lff_col:
p -= 1
np -= 1
cereza_st = False
for element in lf_col:
p -= 1
np -= 1
cereza_st = False
for element in lj_col:
p += 1
np += 1
if salud + 50 >= 340:
salud = 340
else:
salud += 50
cereza_st = False
for element in ljp_col:
vida -= 1
if vida == 0:
fin_juego = True
salud -= 0.3
reloj.tick(60)
else:
if pausa and not fin_juego:
texto = fuente.render('Pausa', True, NEGRO)
pantalla.blit(texto, [330, 280])
pygame.display.flip()
if fin_juego:
texto = fuente.render('Fin del juego', True, BLANCO)
pantalla.fill(NEGRO)
pantalla.blit(texto, [330, 280])
pygame.display.flip()
<|reserved_special_token_1|>
import pygame
import random
from lb_juego import *
ALTO = 400
ANCHO = 600
ROJO = 255, 0, 0
SALMON = 240, 99, 99
BLANCO = 255, 255, 255
NEGRO = 0, 0, 0
AZUL = 59, 131, 189
VERDE = 0, 255, 0
if __name__ == '__main__':
pygame.init()
fuente = pygame.font.Font(None, 36)
pantalla = pygame.display.set_mode([ANCHO, ALTO])
pantalla.fill(BLANCO)
General = pygame.sprite.Group()
Jugadores = pygame.sprite.Group()
Frutas = pygame.sprite.Group()
Frutas_all = pygame.sprite.Group()
Flechas = pygame.sprite.Group()
Frutas_Pod = pygame.sprite.Group()
jugador = Jugador()
final = Final()
inicial = Inicial()
General.add(inicial)
General.add(jugador)
General.add(final)
Jugadores.add(jugador)
cereza_st = False
pausa = False
nc = 0
ncp = 0
vida = 3
salud = 340
var = 4
p = 0
np = 0
reloj = pygame.time.Clock()
fin_juego = False
fin = False
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
jugador.var_x = var
if event.key == pygame.K_LEFT:
jugador.var_x = -var
if event.key == pygame.K_SPACE:
jugador.var_x = 0
if event.key == pygame.K_UP:
proyectil = Flecha()
proyectil.rect.x = jugador.rect.x + 25
proyectil.rect.y = jugador.rect.y
Flechas.add(proyectil)
General.add(proyectil)
if event.key == pygame.K_p:
if pausa:
pausa = False
else:
pausa = True
if not fin_juego and not pausa:
pantalla.fill(BLANCO)
General.update()
General.draw(pantalla)
if salud < 20:
salud = 340
vida -= 1
pygame.draw.polygon(pantalla, AZUL, [[0, 395], [vida * 100, 395
], [vida * 100, 400], [0, 400]])
pygame.draw.polygon(pantalla, SALMON, [[10, 20], [10, salud], [
5, salud], [5, 20]])
points = fuente.render('Puntos= ' + str(p), True, NEGRO)
pantalla.blit(points, [10, 360])
pygame.display.flip()
lf_col = pygame.sprite.spritecollide(final, Frutas, True)
li_col = pygame.sprite.spritecollide(inicial, Flechas, True)
lff_col = pygame.sprite.groupcollide(Flechas, Frutas, True, True)
lffp_col = pygame.sprite.groupcollide(Flechas, Frutas_Pod, True,
True)
lj_col = pygame.sprite.spritecollide(jugador, Frutas, True)
ljp_col = pygame.sprite.spritecollide(jugador, Frutas_Pod, True)
if nc == 5:
cereza_pod = Cereza_podrida()
Frutas_Pod.add(cereza_pod)
General.add(cereza_pod)
nc = 0
if np >= 10 and vida < 6:
vida += 1
np = 0
if p >= 15 and p < 30:
var = 5
for element in Frutas:
element.var_y = 3
for element in Frutas_Pod:
element.var_y = 3
if p >= 30:
var = 7
for element in Frutas:
element.var_y = 5
for element in Frutas_Pod:
element.var_y = 5
if not cereza_st:
cereza = Cereza()
Frutas.add(cereza)
General.add(cereza)
nc += 1
cereza_st = True
for element in lffp_col:
p += 2
np += 2
if salud + 50 > 340:
salud = 340
else:
salud += 50
for element in lff_col:
p -= 1
np -= 1
cereza_st = False
for element in lf_col:
p -= 1
np -= 1
cereza_st = False
for element in lj_col:
p += 1
np += 1
if salud + 50 >= 340:
salud = 340
else:
salud += 50
cereza_st = False
for element in ljp_col:
vida -= 1
if vida == 0:
fin_juego = True
salud -= 0.3
reloj.tick(60)
else:
if pausa and not fin_juego:
texto = fuente.render('Pausa', True, NEGRO)
pantalla.blit(texto, [330, 280])
pygame.display.flip()
if fin_juego:
texto = fuente.render('Fin del juego', True, BLANCO)
pantalla.fill(NEGRO)
pantalla.blit(texto, [330, 280])
pygame.display.flip()
<|reserved_special_token_1|>
import pygame
import random
from lb_juego import*
#Dimensiones de la pantalla
ALTO=400
ANCHO=600
#lista de colores basicos
ROJO=(255,0,0)
SALMON=(240,99,99)
BLANCO=(255,255,255)
NEGRO=(0,0,0)
AZUL=(59,131,189)
VERDE=(0,255,0)
if __name__=='__main__':
#Inicializacion de la aplicacion en pygame
pygame.init()
#Declaracion del tipo de fuente
fuente=pygame.font.Font(None,36)
#Declaracion de la pantalla y sus caracteristicas
pantalla=pygame.display.set_mode([ANCHO,ALTO])
pantalla.fill(BLANCO)
#Declaracion de los grupos de sprites que usaremos
General=pygame.sprite.Group()
Jugadores=pygame.sprite.Group()
Frutas=pygame.sprite.Group()
Frutas_all=pygame.sprite.Group()
Flechas=pygame.sprite.Group()
Frutas_Pod=pygame.sprite.Group()
#Objetos globales que seran usados
jugador=Jugador()
final=Final()
inicial=Inicial()
General.add(inicial)
General.add(jugador)
General.add(final)
Jugadores.add(jugador)
#Declaracion de las variables locales
cereza_st=False
pausa=False
nc=0
ncp=0
vida=3
salud=340
var=4
p=0
np=0
reloj=pygame.time.Clock()
fin_juego=False
fin=False
#Ciclo en el que sera ejecutado el programa
while not fin:
for event in pygame.event.get():
if event.type==pygame.QUIT:
fin=True
#Deteccion de los eventos de teclado
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_RIGHT:
jugador.var_x=var
if event.key==pygame.K_LEFT:
jugador.var_x=-var
if event.key==pygame.K_SPACE:
jugador.var_x=0
if event.key==pygame.K_UP:
proyectil=Flecha()
proyectil.rect.x=jugador.rect.x+25
proyectil.rect.y=jugador.rect.y
Flechas.add(proyectil)
General.add(proyectil)
if event.key==pygame.K_p:
if pausa:
pausa=False
else:
pausa=True
if not fin_juego and not pausa:
#Dibujando cada objeto en pantalla
pantalla.fill(BLANCO)
General.update()
General.draw(pantalla)
#Control de las barras de salud y vida
if salud < 20:
salud = 340
vida-=1
pygame.draw.polygon(pantalla,AZUL,[[0,395],[vida*100,395],[vida*100,400],[0,400]])
pygame.draw.polygon(pantalla,SALMON,[[10,20],[10,salud],[5,salud],[5,20]])
points=fuente.render("Puntos= "+str(p),True,NEGRO)
pantalla.blit(points,[10,360])
#Actualizacion de la pantalla
pygame.display.flip()
#Declaracion de las listas de colision
lf_col=pygame.sprite.spritecollide(final,Frutas,True)
li_col=pygame.sprite.spritecollide(inicial,Flechas,True)
lff_col=pygame.sprite.groupcollide(Flechas,Frutas,True,True)
lffp_col=pygame.sprite.groupcollide(Flechas,Frutas_Pod,True,True)
lj_col=pygame.sprite.spritecollide(jugador,Frutas,True)
ljp_col=pygame.sprite.spritecollide(jugador,Frutas_Pod,True)
#Control de los puntajes
if nc==5:
cereza_pod=Cereza_podrida()
Frutas_Pod.add(cereza_pod)
General.add(cereza_pod)
nc=0
if np>=10 and vida < 6 :
vida+=1;
np=0
if p>=15 and p<30:
var=5
for element in Frutas:
element.var_y=3
for element in Frutas_Pod:
element.var_y=3
if p>=30:
var=7
for element in Frutas:
element.var_y=5
for element in Frutas_Pod:
element.var_y=5
#Dibujando las cerezas
if not cereza_st:
cereza=Cereza()
Frutas.add(cereza)
General.add(cereza)
nc+=1
cereza_st=True
#Declarando acciones que realizara cada lista de colision
for element in lffp_col:
p+=2
np+=2
if salud+50 > 340:
salud=340
else:
salud+=50
for element in lff_col:
p-=1
np-=1
cereza_st=False
for element in lf_col:
p-=1
np-=1
cereza_st=False
for element in lj_col:
p+=1
np+=1
if salud+50 >= 340:
salud=340
else:
salud+=50
cereza_st=False
for element in ljp_col:
vida-=1
#Definiendo el estado bajo el cual se acaba el juego
if vida==0:
fin_juego=True
salud-=0.3
reloj.tick(60)
else:
#Estado de pausa
if pausa and not fin_juego:
texto=fuente.render("Pausa",True,NEGRO)
pantalla.blit(texto,[330,280])
pygame.display.flip()
#Estado de fin de juego
if fin_juego:
texto=fuente.render("Fin del juego",True,BLANCO)
pantalla.fill(NEGRO)
pantalla.blit(texto,[330,280])
pygame.display.flip()
|
flexible
|
{
"blob_id": "85fc2fc0a404c20b1f0806412424192ea4a50a9b",
"index": 7085,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pygame.init()\n fuente = pygame.font.Font(None, 36)\n pantalla = pygame.display.set_mode([ANCHO, ALTO])\n pantalla.fill(BLANCO)\n General = pygame.sprite.Group()\n Jugadores = pygame.sprite.Group()\n Frutas = pygame.sprite.Group()\n Frutas_all = pygame.sprite.Group()\n Flechas = pygame.sprite.Group()\n Frutas_Pod = pygame.sprite.Group()\n jugador = Jugador()\n final = Final()\n inicial = Inicial()\n General.add(inicial)\n General.add(jugador)\n General.add(final)\n Jugadores.add(jugador)\n cereza_st = False\n pausa = False\n nc = 0\n ncp = 0\n vida = 3\n salud = 340\n var = 4\n p = 0\n np = 0\n reloj = pygame.time.Clock()\n fin_juego = False\n fin = False\n while not fin:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n fin = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n jugador.var_x = var\n if event.key == pygame.K_LEFT:\n jugador.var_x = -var\n if event.key == pygame.K_SPACE:\n jugador.var_x = 0\n if event.key == pygame.K_UP:\n proyectil = Flecha()\n proyectil.rect.x = jugador.rect.x + 25\n proyectil.rect.y = jugador.rect.y\n Flechas.add(proyectil)\n General.add(proyectil)\n if event.key == pygame.K_p:\n if pausa:\n pausa = False\n else:\n pausa = True\n if not fin_juego and not pausa:\n pantalla.fill(BLANCO)\n General.update()\n General.draw(pantalla)\n if salud < 20:\n salud = 340\n vida -= 1\n pygame.draw.polygon(pantalla, AZUL, [[0, 395], [vida * 100, 395\n ], [vida * 100, 400], [0, 400]])\n pygame.draw.polygon(pantalla, SALMON, [[10, 20], [10, salud], [\n 5, salud], [5, 20]])\n points = fuente.render('Puntos= ' + str(p), True, NEGRO)\n pantalla.blit(points, [10, 360])\n pygame.display.flip()\n lf_col = pygame.sprite.spritecollide(final, Frutas, True)\n li_col = pygame.sprite.spritecollide(inicial, Flechas, True)\n lff_col = pygame.sprite.groupcollide(Flechas, Frutas, True, True)\n lffp_col = pygame.sprite.groupcollide(Flechas, Frutas_Pod, True,\n True)\n lj_col = pygame.sprite.spritecollide(jugador, Frutas, True)\n ljp_col = pygame.sprite.spritecollide(jugador, Frutas_Pod, True)\n if nc == 5:\n cereza_pod = Cereza_podrida()\n Frutas_Pod.add(cereza_pod)\n General.add(cereza_pod)\n nc = 0\n if np >= 10 and vida < 6:\n vida += 1\n np = 0\n if p >= 15 and p < 30:\n var = 5\n for element in Frutas:\n element.var_y = 3\n for element in Frutas_Pod:\n element.var_y = 3\n if p >= 30:\n var = 7\n for element in Frutas:\n element.var_y = 5\n for element in Frutas_Pod:\n element.var_y = 5\n if not cereza_st:\n cereza = Cereza()\n Frutas.add(cereza)\n General.add(cereza)\n nc += 1\n cereza_st = True\n for element in lffp_col:\n p += 2\n np += 2\n if salud + 50 > 340:\n salud = 340\n else:\n salud += 50\n for element in lff_col:\n p -= 1\n np -= 1\n cereza_st = False\n for element in lf_col:\n p -= 1\n np -= 1\n cereza_st = False\n for element in lj_col:\n p += 1\n np += 1\n if salud + 50 >= 340:\n salud = 340\n else:\n salud += 50\n cereza_st = False\n for element in ljp_col:\n vida -= 1\n if vida == 0:\n fin_juego = True\n salud -= 0.3\n reloj.tick(60)\n else:\n if pausa and not fin_juego:\n texto = fuente.render('Pausa', True, NEGRO)\n pantalla.blit(texto, [330, 280])\n pygame.display.flip()\n if fin_juego:\n texto = fuente.render('Fin del juego', True, BLANCO)\n pantalla.fill(NEGRO)\n pantalla.blit(texto, [330, 280])\n pygame.display.flip()\n",
"step-3": "<mask token>\nALTO = 400\nANCHO = 600\nROJO = 255, 0, 0\nSALMON = 240, 99, 99\nBLANCO = 255, 255, 255\nNEGRO = 0, 0, 0\nAZUL = 59, 131, 189\nVERDE = 0, 255, 0\nif __name__ == '__main__':\n pygame.init()\n fuente = pygame.font.Font(None, 36)\n pantalla = pygame.display.set_mode([ANCHO, ALTO])\n pantalla.fill(BLANCO)\n General = pygame.sprite.Group()\n Jugadores = pygame.sprite.Group()\n Frutas = pygame.sprite.Group()\n Frutas_all = pygame.sprite.Group()\n Flechas = pygame.sprite.Group()\n Frutas_Pod = pygame.sprite.Group()\n jugador = Jugador()\n final = Final()\n inicial = Inicial()\n General.add(inicial)\n General.add(jugador)\n General.add(final)\n Jugadores.add(jugador)\n cereza_st = False\n pausa = False\n nc = 0\n ncp = 0\n vida = 3\n salud = 340\n var = 4\n p = 0\n np = 0\n reloj = pygame.time.Clock()\n fin_juego = False\n fin = False\n while not fin:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n fin = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n jugador.var_x = var\n if event.key == pygame.K_LEFT:\n jugador.var_x = -var\n if event.key == pygame.K_SPACE:\n jugador.var_x = 0\n if event.key == pygame.K_UP:\n proyectil = Flecha()\n proyectil.rect.x = jugador.rect.x + 25\n proyectil.rect.y = jugador.rect.y\n Flechas.add(proyectil)\n General.add(proyectil)\n if event.key == pygame.K_p:\n if pausa:\n pausa = False\n else:\n pausa = True\n if not fin_juego and not pausa:\n pantalla.fill(BLANCO)\n General.update()\n General.draw(pantalla)\n if salud < 20:\n salud = 340\n vida -= 1\n pygame.draw.polygon(pantalla, AZUL, [[0, 395], [vida * 100, 395\n ], [vida * 100, 400], [0, 400]])\n pygame.draw.polygon(pantalla, SALMON, [[10, 20], [10, salud], [\n 5, salud], [5, 20]])\n points = fuente.render('Puntos= ' + str(p), True, NEGRO)\n pantalla.blit(points, [10, 360])\n pygame.display.flip()\n lf_col = pygame.sprite.spritecollide(final, Frutas, True)\n li_col = pygame.sprite.spritecollide(inicial, Flechas, True)\n lff_col = pygame.sprite.groupcollide(Flechas, Frutas, True, True)\n lffp_col = pygame.sprite.groupcollide(Flechas, Frutas_Pod, True,\n True)\n lj_col = pygame.sprite.spritecollide(jugador, Frutas, True)\n ljp_col = pygame.sprite.spritecollide(jugador, Frutas_Pod, True)\n if nc == 5:\n cereza_pod = Cereza_podrida()\n Frutas_Pod.add(cereza_pod)\n General.add(cereza_pod)\n nc = 0\n if np >= 10 and vida < 6:\n vida += 1\n np = 0\n if p >= 15 and p < 30:\n var = 5\n for element in Frutas:\n element.var_y = 3\n for element in Frutas_Pod:\n element.var_y = 3\n if p >= 30:\n var = 7\n for element in Frutas:\n element.var_y = 5\n for element in Frutas_Pod:\n element.var_y = 5\n if not cereza_st:\n cereza = Cereza()\n Frutas.add(cereza)\n General.add(cereza)\n nc += 1\n cereza_st = True\n for element in lffp_col:\n p += 2\n np += 2\n if salud + 50 > 340:\n salud = 340\n else:\n salud += 50\n for element in lff_col:\n p -= 1\n np -= 1\n cereza_st = False\n for element in lf_col:\n p -= 1\n np -= 1\n cereza_st = False\n for element in lj_col:\n p += 1\n np += 1\n if salud + 50 >= 340:\n salud = 340\n else:\n salud += 50\n cereza_st = False\n for element in ljp_col:\n vida -= 1\n if vida == 0:\n fin_juego = True\n salud -= 0.3\n reloj.tick(60)\n else:\n if pausa and not fin_juego:\n texto = fuente.render('Pausa', True, NEGRO)\n pantalla.blit(texto, [330, 280])\n pygame.display.flip()\n if fin_juego:\n texto = fuente.render('Fin del juego', True, BLANCO)\n pantalla.fill(NEGRO)\n pantalla.blit(texto, [330, 280])\n pygame.display.flip()\n",
"step-4": "import pygame\nimport random\nfrom lb_juego import *\nALTO = 400\nANCHO = 600\nROJO = 255, 0, 0\nSALMON = 240, 99, 99\nBLANCO = 255, 255, 255\nNEGRO = 0, 0, 0\nAZUL = 59, 131, 189\nVERDE = 0, 255, 0\nif __name__ == '__main__':\n pygame.init()\n fuente = pygame.font.Font(None, 36)\n pantalla = pygame.display.set_mode([ANCHO, ALTO])\n pantalla.fill(BLANCO)\n General = pygame.sprite.Group()\n Jugadores = pygame.sprite.Group()\n Frutas = pygame.sprite.Group()\n Frutas_all = pygame.sprite.Group()\n Flechas = pygame.sprite.Group()\n Frutas_Pod = pygame.sprite.Group()\n jugador = Jugador()\n final = Final()\n inicial = Inicial()\n General.add(inicial)\n General.add(jugador)\n General.add(final)\n Jugadores.add(jugador)\n cereza_st = False\n pausa = False\n nc = 0\n ncp = 0\n vida = 3\n salud = 340\n var = 4\n p = 0\n np = 0\n reloj = pygame.time.Clock()\n fin_juego = False\n fin = False\n while not fin:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n fin = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n jugador.var_x = var\n if event.key == pygame.K_LEFT:\n jugador.var_x = -var\n if event.key == pygame.K_SPACE:\n jugador.var_x = 0\n if event.key == pygame.K_UP:\n proyectil = Flecha()\n proyectil.rect.x = jugador.rect.x + 25\n proyectil.rect.y = jugador.rect.y\n Flechas.add(proyectil)\n General.add(proyectil)\n if event.key == pygame.K_p:\n if pausa:\n pausa = False\n else:\n pausa = True\n if not fin_juego and not pausa:\n pantalla.fill(BLANCO)\n General.update()\n General.draw(pantalla)\n if salud < 20:\n salud = 340\n vida -= 1\n pygame.draw.polygon(pantalla, AZUL, [[0, 395], [vida * 100, 395\n ], [vida * 100, 400], [0, 400]])\n pygame.draw.polygon(pantalla, SALMON, [[10, 20], [10, salud], [\n 5, salud], [5, 20]])\n points = fuente.render('Puntos= ' + str(p), True, NEGRO)\n pantalla.blit(points, [10, 360])\n pygame.display.flip()\n lf_col = pygame.sprite.spritecollide(final, Frutas, True)\n li_col = pygame.sprite.spritecollide(inicial, Flechas, True)\n lff_col = pygame.sprite.groupcollide(Flechas, Frutas, True, True)\n lffp_col = pygame.sprite.groupcollide(Flechas, Frutas_Pod, True,\n True)\n lj_col = pygame.sprite.spritecollide(jugador, Frutas, True)\n ljp_col = pygame.sprite.spritecollide(jugador, Frutas_Pod, True)\n if nc == 5:\n cereza_pod = Cereza_podrida()\n Frutas_Pod.add(cereza_pod)\n General.add(cereza_pod)\n nc = 0\n if np >= 10 and vida < 6:\n vida += 1\n np = 0\n if p >= 15 and p < 30:\n var = 5\n for element in Frutas:\n element.var_y = 3\n for element in Frutas_Pod:\n element.var_y = 3\n if p >= 30:\n var = 7\n for element in Frutas:\n element.var_y = 5\n for element in Frutas_Pod:\n element.var_y = 5\n if not cereza_st:\n cereza = Cereza()\n Frutas.add(cereza)\n General.add(cereza)\n nc += 1\n cereza_st = True\n for element in lffp_col:\n p += 2\n np += 2\n if salud + 50 > 340:\n salud = 340\n else:\n salud += 50\n for element in lff_col:\n p -= 1\n np -= 1\n cereza_st = False\n for element in lf_col:\n p -= 1\n np -= 1\n cereza_st = False\n for element in lj_col:\n p += 1\n np += 1\n if salud + 50 >= 340:\n salud = 340\n else:\n salud += 50\n cereza_st = False\n for element in ljp_col:\n vida -= 1\n if vida == 0:\n fin_juego = True\n salud -= 0.3\n reloj.tick(60)\n else:\n if pausa and not fin_juego:\n texto = fuente.render('Pausa', True, NEGRO)\n pantalla.blit(texto, [330, 280])\n pygame.display.flip()\n if fin_juego:\n texto = fuente.render('Fin del juego', True, BLANCO)\n pantalla.fill(NEGRO)\n pantalla.blit(texto, [330, 280])\n pygame.display.flip()\n",
"step-5": "import pygame\nimport random\nfrom lb_juego import*\n\n#Dimensiones de la pantalla\nALTO=400\nANCHO=600\n#lista de colores basicos\nROJO=(255,0,0)\nSALMON=(240,99,99)\nBLANCO=(255,255,255)\nNEGRO=(0,0,0)\nAZUL=(59,131,189)\nVERDE=(0,255,0)\n\nif __name__=='__main__':\n #Inicializacion de la aplicacion en pygame\n pygame.init()\n #Declaracion del tipo de fuente\n fuente=pygame.font.Font(None,36)\n #Declaracion de la pantalla y sus caracteristicas\n pantalla=pygame.display.set_mode([ANCHO,ALTO])\n pantalla.fill(BLANCO)\n #Declaracion de los grupos de sprites que usaremos\n General=pygame.sprite.Group()\n Jugadores=pygame.sprite.Group()\n Frutas=pygame.sprite.Group()\n Frutas_all=pygame.sprite.Group()\n Flechas=pygame.sprite.Group()\n Frutas_Pod=pygame.sprite.Group()\n #Objetos globales que seran usados\n jugador=Jugador()\n final=Final()\n inicial=Inicial()\n General.add(inicial)\n General.add(jugador)\n General.add(final)\n Jugadores.add(jugador)\n #Declaracion de las variables locales\n cereza_st=False\n pausa=False\n nc=0\n ncp=0\n vida=3\n salud=340\n var=4\n p=0\n np=0\n reloj=pygame.time.Clock()\n fin_juego=False\n fin=False\n #Ciclo en el que sera ejecutado el programa\n while not fin:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n fin=True\n #Deteccion de los eventos de teclado\n if event.type==pygame.KEYDOWN:\n if event.key==pygame.K_RIGHT:\n jugador.var_x=var\n if event.key==pygame.K_LEFT:\n jugador.var_x=-var\n if event.key==pygame.K_SPACE:\n jugador.var_x=0\n if event.key==pygame.K_UP:\n proyectil=Flecha()\n proyectil.rect.x=jugador.rect.x+25\n proyectil.rect.y=jugador.rect.y\n Flechas.add(proyectil)\n General.add(proyectil)\n if event.key==pygame.K_p:\n if pausa:\n pausa=False\n else:\n pausa=True\n if not fin_juego and not pausa:\n #Dibujando cada objeto en pantalla\n pantalla.fill(BLANCO)\n General.update()\n General.draw(pantalla)\n #Control de las barras de salud y vida\n if salud < 20:\n salud = 340\n vida-=1\n pygame.draw.polygon(pantalla,AZUL,[[0,395],[vida*100,395],[vida*100,400],[0,400]])\n pygame.draw.polygon(pantalla,SALMON,[[10,20],[10,salud],[5,salud],[5,20]])\n points=fuente.render(\"Puntos= \"+str(p),True,NEGRO)\n pantalla.blit(points,[10,360])\n #Actualizacion de la pantalla\n pygame.display.flip()\n #Declaracion de las listas de colision\n lf_col=pygame.sprite.spritecollide(final,Frutas,True)\n li_col=pygame.sprite.spritecollide(inicial,Flechas,True)\n lff_col=pygame.sprite.groupcollide(Flechas,Frutas,True,True)\n lffp_col=pygame.sprite.groupcollide(Flechas,Frutas_Pod,True,True)\n lj_col=pygame.sprite.spritecollide(jugador,Frutas,True)\n ljp_col=pygame.sprite.spritecollide(jugador,Frutas_Pod,True)\n #Control de los puntajes\n if nc==5:\n cereza_pod=Cereza_podrida()\n Frutas_Pod.add(cereza_pod)\n General.add(cereza_pod)\n nc=0\n if np>=10 and vida < 6 :\n vida+=1;\n np=0\n if p>=15 and p<30:\n var=5\n for element in Frutas:\n element.var_y=3\n for element in Frutas_Pod:\n element.var_y=3\n if p>=30:\n var=7\n for element in Frutas:\n element.var_y=5\n for element in Frutas_Pod:\n element.var_y=5\n #Dibujando las cerezas\n if not cereza_st:\n cereza=Cereza()\n Frutas.add(cereza)\n General.add(cereza)\n nc+=1\n cereza_st=True\n #Declarando acciones que realizara cada lista de colision\n for element in lffp_col:\n p+=2\n np+=2\n if salud+50 > 340:\n salud=340\n else:\n salud+=50\n for element in lff_col:\n p-=1\n np-=1\n cereza_st=False\n for element in lf_col:\n p-=1\n np-=1\n cereza_st=False\n for element in lj_col:\n p+=1\n np+=1\n if salud+50 >= 340:\n salud=340\n else:\n salud+=50\n cereza_st=False\n for element in ljp_col:\n vida-=1\n #Definiendo el estado bajo el cual se acaba el juego\n if vida==0:\n fin_juego=True\n salud-=0.3\n reloj.tick(60)\n else:\n #Estado de pausa\n if pausa and not fin_juego:\n texto=fuente.render(\"Pausa\",True,NEGRO)\n pantalla.blit(texto,[330,280])\n pygame.display.flip()\n #Estado de fin de juego\n if fin_juego:\n texto=fuente.render(\"Fin del juego\",True,BLANCO)\n pantalla.fill(NEGRO)\n pantalla.blit(texto,[330,280])\n pygame.display.flip()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,
outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,
dragons, defiant, valiant, titans, justice):
teamList = discord.Embed(title='Overwatch League Teams', description=
'2021 Season\n' + '**' + reign + """ATL-Atlanta Reign**
""" + '**' +
uprising + 'BOS-Boston Uprising**\n' + '**' + hunters +
'CDH-Chengdu Hunters**\n' + '**' + fuel + """DAL-Dallas Fuel**
""" +
'**' + mayhem + 'FLA-Florida Mayhem**\n' + '**' + gladiators +
"""GLA-Los Angeles Gladiators**
""" + '**' + charge +
'GZC-Guangzhou Charge**\n' + '**' + outlaws +
'HOU-Houston Outlaws**\n' + '**' + spark +
"""HZS-Hangzhou Spark**
""" + '**' + spitfire +
'LDN-London Spitfire**\n' + '**' + excelsior +
'NYE-New York Excelsior**\n' + '**' + eternal +
"""PAR-Paris Eternal**
""" + '**' + fusion +
'PHI-Philadelphia Fustion**\n' + '**' + dynasty +
'SEO-Seoul Dynasty**\n' + '**' + shock +
"""SFS-San Francisco Shock**
""" + '**' + dragons +
'SHD-Shanghai Dragons**\n' + '**' + defiant +
"""TOR-Toronto Defiant**
**""" + valiant +
'VAL-Los Angeles Valiant**\n' + '**' + titans +
'VAN-Vancouver Titans**\n' + '**' + justice +
'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=
datetime.datetime.utcnow())
return teamList
<|reserved_special_token_1|>
import datetime
import discord
def getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,
outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,
dragons, defiant, valiant, titans, justice):
teamList = discord.Embed(title='Overwatch League Teams', description=
'2021 Season\n' + '**' + reign + """ATL-Atlanta Reign**
""" + '**' +
uprising + 'BOS-Boston Uprising**\n' + '**' + hunters +
'CDH-Chengdu Hunters**\n' + '**' + fuel + """DAL-Dallas Fuel**
""" +
'**' + mayhem + 'FLA-Florida Mayhem**\n' + '**' + gladiators +
"""GLA-Los Angeles Gladiators**
""" + '**' + charge +
'GZC-Guangzhou Charge**\n' + '**' + outlaws +
'HOU-Houston Outlaws**\n' + '**' + spark +
"""HZS-Hangzhou Spark**
""" + '**' + spitfire +
'LDN-London Spitfire**\n' + '**' + excelsior +
'NYE-New York Excelsior**\n' + '**' + eternal +
"""PAR-Paris Eternal**
""" + '**' + fusion +
'PHI-Philadelphia Fustion**\n' + '**' + dynasty +
'SEO-Seoul Dynasty**\n' + '**' + shock +
"""SFS-San Francisco Shock**
""" + '**' + dragons +
'SHD-Shanghai Dragons**\n' + '**' + defiant +
"""TOR-Toronto Defiant**
**""" + valiant +
'VAL-Los Angeles Valiant**\n' + '**' + titans +
'VAN-Vancouver Titans**\n' + '**' + justice +
'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=
datetime.datetime.utcnow())
return teamList
<|reserved_special_token_1|>
import datetime
import discord
def getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge, outlaws, spark,
spitfire, excelsior, eternal, fusion, dynasty, shock, dragons, defiant, valiant, titans,
justice) :
teamList = discord.Embed(
title="Overwatch League Teams",
description="2021 Season\n"+
"**"+reign+"ATL-Atlanta Reign**\n"+
"**"+uprising+"BOS-Boston Uprising**\n"+
"**"+hunters+"CDH-Chengdu Hunters**\n"+
"**"+fuel+"DAL-Dallas Fuel**\n"+
"**"+mayhem+"FLA-Florida Mayhem**\n"+
"**"+gladiators+"GLA-Los Angeles Gladiators**\n"+
"**"+charge+"GZC-Guangzhou Charge**\n"+
"**"+outlaws+"HOU-Houston Outlaws**\n"+
"**"+spark+"HZS-Hangzhou Spark**\n"+
"**"+spitfire+"LDN-London Spitfire**\n"+
"**"+excelsior+"NYE-New York Excelsior**\n"+
"**"+eternal+"PAR-Paris Eternal**\n"+
"**"+fusion+"PHI-Philadelphia Fustion**\n"+
"**"+dynasty+"SEO-Seoul Dynasty**\n"+
"**"+shock+"SFS-San Francisco Shock**\n"+
"**"+dragons+"SHD-Shanghai Dragons**\n"+
"**"+defiant+"TOR-Toronto Defiant**\n"
"**"+valiant+"VAL-Los Angeles Valiant**\n"+
"**"+titans+"VAN-Vancouver Titans**\n"+
"**"+justice+"WAS-Washington Justice**",
color=discord.Colour.gold(),
timestamp=datetime.datetime.utcnow()
)
return teamList
|
flexible
|
{
"blob_id": "9a02e09cbfe2c9b6ebb9d20ba6cea639871f0838",
"index": 7647,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,\n outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,\n dragons, defiant, valiant, titans, justice):\n teamList = discord.Embed(title='Overwatch League Teams', description=\n '2021 Season\\n' + '**' + reign + \"\"\"ATL-Atlanta Reign**\n\"\"\" + '**' +\n uprising + 'BOS-Boston Uprising**\\n' + '**' + hunters +\n 'CDH-Chengdu Hunters**\\n' + '**' + fuel + \"\"\"DAL-Dallas Fuel**\n\"\"\" +\n '**' + mayhem + 'FLA-Florida Mayhem**\\n' + '**' + gladiators +\n \"\"\"GLA-Los Angeles Gladiators**\n\"\"\" + '**' + charge +\n 'GZC-Guangzhou Charge**\\n' + '**' + outlaws +\n 'HOU-Houston Outlaws**\\n' + '**' + spark +\n \"\"\"HZS-Hangzhou Spark**\n\"\"\" + '**' + spitfire +\n 'LDN-London Spitfire**\\n' + '**' + excelsior +\n 'NYE-New York Excelsior**\\n' + '**' + eternal +\n \"\"\"PAR-Paris Eternal**\n\"\"\" + '**' + fusion +\n 'PHI-Philadelphia Fustion**\\n' + '**' + dynasty +\n 'SEO-Seoul Dynasty**\\n' + '**' + shock +\n \"\"\"SFS-San Francisco Shock**\n\"\"\" + '**' + dragons +\n 'SHD-Shanghai Dragons**\\n' + '**' + defiant +\n \"\"\"TOR-Toronto Defiant**\n**\"\"\" + valiant +\n 'VAL-Los Angeles Valiant**\\n' + '**' + titans +\n 'VAN-Vancouver Titans**\\n' + '**' + justice +\n 'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=\n datetime.datetime.utcnow())\n return teamList\n",
"step-3": "import datetime\nimport discord\n\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,\n outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,\n dragons, defiant, valiant, titans, justice):\n teamList = discord.Embed(title='Overwatch League Teams', description=\n '2021 Season\\n' + '**' + reign + \"\"\"ATL-Atlanta Reign**\n\"\"\" + '**' +\n uprising + 'BOS-Boston Uprising**\\n' + '**' + hunters +\n 'CDH-Chengdu Hunters**\\n' + '**' + fuel + \"\"\"DAL-Dallas Fuel**\n\"\"\" +\n '**' + mayhem + 'FLA-Florida Mayhem**\\n' + '**' + gladiators +\n \"\"\"GLA-Los Angeles Gladiators**\n\"\"\" + '**' + charge +\n 'GZC-Guangzhou Charge**\\n' + '**' + outlaws +\n 'HOU-Houston Outlaws**\\n' + '**' + spark +\n \"\"\"HZS-Hangzhou Spark**\n\"\"\" + '**' + spitfire +\n 'LDN-London Spitfire**\\n' + '**' + excelsior +\n 'NYE-New York Excelsior**\\n' + '**' + eternal +\n \"\"\"PAR-Paris Eternal**\n\"\"\" + '**' + fusion +\n 'PHI-Philadelphia Fustion**\\n' + '**' + dynasty +\n 'SEO-Seoul Dynasty**\\n' + '**' + shock +\n \"\"\"SFS-San Francisco Shock**\n\"\"\" + '**' + dragons +\n 'SHD-Shanghai Dragons**\\n' + '**' + defiant +\n \"\"\"TOR-Toronto Defiant**\n**\"\"\" + valiant +\n 'VAL-Los Angeles Valiant**\\n' + '**' + titans +\n 'VAN-Vancouver Titans**\\n' + '**' + justice +\n 'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=\n datetime.datetime.utcnow())\n return teamList\n",
"step-4": "import datetime\nimport discord\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge, outlaws, spark,\nspitfire, excelsior, eternal, fusion, dynasty, shock, dragons, defiant, valiant, titans,\njustice) :\n teamList = discord.Embed(\n title=\"Overwatch League Teams\",\n description=\"2021 Season\\n\"+\n \"**\"+reign+\"ATL-Atlanta Reign**\\n\"+\n \"**\"+uprising+\"BOS-Boston Uprising**\\n\"+\n \"**\"+hunters+\"CDH-Chengdu Hunters**\\n\"+\n \"**\"+fuel+\"DAL-Dallas Fuel**\\n\"+\n \"**\"+mayhem+\"FLA-Florida Mayhem**\\n\"+\n \"**\"+gladiators+\"GLA-Los Angeles Gladiators**\\n\"+\n \"**\"+charge+\"GZC-Guangzhou Charge**\\n\"+\n \"**\"+outlaws+\"HOU-Houston Outlaws**\\n\"+\n \"**\"+spark+\"HZS-Hangzhou Spark**\\n\"+\n \"**\"+spitfire+\"LDN-London Spitfire**\\n\"+\n \"**\"+excelsior+\"NYE-New York Excelsior**\\n\"+\n \"**\"+eternal+\"PAR-Paris Eternal**\\n\"+\n \"**\"+fusion+\"PHI-Philadelphia Fustion**\\n\"+\n \"**\"+dynasty+\"SEO-Seoul Dynasty**\\n\"+\n \"**\"+shock+\"SFS-San Francisco Shock**\\n\"+\n \"**\"+dragons+\"SHD-Shanghai Dragons**\\n\"+\n \"**\"+defiant+\"TOR-Toronto Defiant**\\n\"\n \"**\"+valiant+\"VAL-Los Angeles Valiant**\\n\"+\n \"**\"+titans+\"VAN-Vancouver Titans**\\n\"+\n \"**\"+justice+\"WAS-Washington Justice**\",\n color=discord.Colour.gold(),\n timestamp=datetime.datetime.utcnow()\n )\n return teamList\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import tornado.web
import tornado.escape
from torcms.core.base_handler import BaseHandler
from owslib.csw import CatalogueServiceWeb
from owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
def get(self, url_str=''):
url_arr = self.parse_url(url_str)
if len(url_str) > 0:
url_arr = url_str.split('/')
# if url_str == '':
# self.render('metadata/meta_index.html')
if url_str == '':
self.list('')
elif url_arr[0] == 'search':
if len(url_arr[0]) >= 3:
self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])
else:
self.search(url_arr[1], url_arr[2], '', 10)
elif url_arr[0] == 'view':
self.ajax_get(url_arr[1], url_arr[2])
# def post(self, *args, **kwargs):
# post_data = self.get_request_arguments()
# keyword = post_data.get('keyw9', '')
# isweb = post_data.get('isweb', '1')
# ldrt = post_data.get('ldrt', '')
# maxrecords = post_data.get('maxrecords', 20)
#
# self.redirect('/directory_search/search/{0}/{1}/{2}/{3}'.format(keyword, isweb, ldrt, maxrecords))
# def search(self, keyw):
# # print('====' * 40)
# # print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def list(self, keyw):
# print('====' * 40)
# print(post_data)
keyw = 'data'
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query_like], maxrecords=20)
print('-' * 20)
print(csw.results)
for rec in csw.results:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/meta_index.html',
meta_results=csw.records,
userinfo=self.userinfo)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def search(self, keyw, isweb, ldrt, max_num):
# print('=' * 40)
# print(ldrt)
post_data = self.get_request_arguments()
startnum = post_data.get('startnum', 0)
startposition = int(startnum) * int(max_num) +1
print("," * 50)
print(startnum)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
if ldrt:
print('=' * 40)
print(type(ldrt))
print(ldrt)
print('=' * 40)
xx_ldrt = [float(x) for x in ldrt.split(',')]
xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]
print(xx_ldrt)
bbox_query = BBox(xx_ldrt)
if isweb == '1':
# birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[bbox_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query, bbox_query], maxrecords=max_num, startposition=startposition,
distributedsearch=True,
hopcount=2)
else:
if isweb == '1':
birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], maxrecords=max_num, startposition=startposition, distributedsearch=True,
hopcount=2)
print('-' * 20)
print(isweb)
print(csw.results)
for rec in csw.records:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}&
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/show_result.html',
meta_results=csw.records,
userinfo=self.userinfo,
isweb=isweb,
startnum = startnum
)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# def get_result(self, post_data):
# print('====' * 40)
# print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5'.format(
# post_data['keyw'][0])
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# # data = urllib.request.Request(url)
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
# uuid = uuid.split(':')[-1]
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20, startposition=0, distributedsearch=True,
hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {
'title': '',
'uid': '',
'sizhi': '',
}
self.render('../torcms_dde/search/show_rec.html',
kws=out_dict,
# meta_rec=csw.records.get(uuid),
meta_rec=rec,
unescape=tornado.escape.xhtml_unescape,
userinfo=self.userinfo
)
# #
# def parseXML(self, data):
#
# tree = etree.fromstring(data)
# # root = tree.getroot()
# uu = tree.findall('zs:record', tree.nsmap)
#
# meta_arr = []
# for x in uu:
# meta_arr.append(MyXML(x))
# # print(x.element('ows:LowerCorner'))
# # uu = etree.SubElement(x, "LowerCorner")
# # for sub_ele in x.iter():
# # print(sub_ele.tag)
# # if 'title' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
# # if 'LowerCorner' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
#
# self.render('metadata/show_result.html',
# meta_arr=meta_arr)
class MyXML():
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
|
normal
|
{
"blob_id": "72ce7c48c9d1a7bcdbaead12648d03970663a11e",
"index": 3227,
"step-1": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n <mask token>\n <mask token>\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-2": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n <mask token>\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-3": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n\n def list(self, keyw):\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n for rec in csw.results:\n print(rec)\n self.render('../torcms_dde/search/meta_index.html', meta_results=\n csw.records, userinfo=self.userinfo)\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-4": "import tornado.web\nimport tornado.escape\nfrom torcms.core.base_handler import BaseHandler\nfrom owslib.csw import CatalogueServiceWeb\nfrom owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n\n def get(self, url_str=''):\n url_arr = self.parse_url(url_str)\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n if url_str == '':\n self.list('')\n elif url_arr[0] == 'search':\n if len(url_arr[0]) >= 3:\n self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])\n else:\n self.search(url_arr[1], url_arr[2], '', 10)\n elif url_arr[0] == 'view':\n self.ajax_get(url_arr[1], url_arr[2])\n\n def list(self, keyw):\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n for rec in csw.results:\n print(rec)\n self.render('../torcms_dde/search/meta_index.html', meta_results=\n csw.records, userinfo=self.userinfo)\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-5": "import tornado.web\nimport tornado.escape\nfrom torcms.core.base_handler import BaseHandler\nfrom owslib.csw import CatalogueServiceWeb\nfrom owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox\n\n\nclass DirectorySearchHandler(BaseHandler):\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n\n def get(self, url_str=''):\n url_arr = self.parse_url(url_str)\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n # if url_str == '':\n # self.render('metadata/meta_index.html')\n\n if url_str == '':\n self.list('')\n elif url_arr[0] == 'search':\n if len(url_arr[0]) >= 3:\n self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])\n else:\n self.search(url_arr[1], url_arr[2], '', 10)\n\n elif url_arr[0] == 'view':\n self.ajax_get(url_arr[1], url_arr[2])\n\n # def post(self, *args, **kwargs):\n # post_data = self.get_request_arguments()\n # keyword = post_data.get('keyw9', '')\n # isweb = post_data.get('isweb', '1')\n # ldrt = post_data.get('ldrt', '')\n # maxrecords = post_data.get('maxrecords', 20)\n #\n # self.redirect('/directory_search/search/{0}/{1}/{2}/{3}'.format(keyword, isweb, ldrt, maxrecords))\n\n # def search(self, keyw):\n # # print('====' * 40)\n # # print(post_data)\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\n # &maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n def list(self, keyw):\n # print('====' * 40)\n # print(post_data)\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n\n for rec in csw.results:\n print(rec)\n\n # out_dic = {}\n # for rec in csw.records:\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\\\n # maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n\n self.render('../torcms_dde/search/meta_index.html',\n meta_results=csw.records,\n userinfo=self.userinfo)\n\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n\n def search(self, keyw, isweb, ldrt, max_num):\n # print('=' * 40)\n # print(ldrt)\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n\n startposition = int(startnum) * int(max_num) +1\n print(\",\" * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n # birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n\n\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n\n print(xx_ldrt)\n\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n\n # birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[bbox_query], startposition=startposition,maxrecords=max_num)\n\n else:\n\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query], maxrecords=max_num, startposition=startposition,\n distributedsearch=True,\n hopcount=2)\n else:\n if isweb == '1':\n\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=startposition,maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num, startposition=startposition, distributedsearch=True,\n hopcount=2)\n print('-' * 20)\n print(isweb)\n print(csw.results)\n\n for rec in csw.records:\n print(rec)\n\n # out_dic = {}\n # for rec in csw.records:\n\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}&\n # maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n\n self.render('../torcms_dde/search/show_result.html',\n meta_results=csw.records,\n userinfo=self.userinfo,\n isweb=isweb,\n startnum = startnum\n )\n\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n\n # def get_result(self, post_data):\n # print('====' * 40)\n # print(post_data)\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\n # &maximumRecords=5&startRecord=5'.format(\n # post_data['keyw'][0])\n # r = requests.get(url)\n # pprint.pprint(r.text)\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n # # data = urllib.request.Request(url)\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n # uuid = uuid.split(':')[-1]\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n # birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20, startposition=0, distributedsearch=True,\n hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n\n out_dict = {\n 'title': '',\n 'uid': '',\n 'sizhi': '',\n\n }\n\n self.render('../torcms_dde/search/show_rec.html',\n kws=out_dict,\n # meta_rec=csw.records.get(uuid),\n meta_rec=rec,\n unescape=tornado.escape.xhtml_unescape,\n userinfo=self.userinfo\n )\n\n # #\n # def parseXML(self, data):\n #\n # tree = etree.fromstring(data)\n # # root = tree.getroot()\n # uu = tree.findall('zs:record', tree.nsmap)\n #\n # meta_arr = []\n # for x in uu:\n # meta_arr.append(MyXML(x))\n # # print(x.element('ows:LowerCorner'))\n # # uu = etree.SubElement(x, \"LowerCorner\")\n # # for sub_ele in x.iter():\n # # print(sub_ele.tag)\n # # if 'title' == sub_ele.tag.split('}')[1]:\n # # print(sub_ele.text)\n # # if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n # # print(sub_ele.text)\n #\n # self.render('metadata/show_result.html',\n # meta_arr=meta_arr)\n\n\nclass MyXML():\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
from typing import List
import tensorflow as tf
from tensorflow.keras.layers import Dense
"""Possible agent network structures implemented as Tensorflow Modules"""
class QNetwork:
"""Create the neural network architecture for the DQN agent."""
def __init__(
self,
state_dim: int,
action_dim: int = 3, # Default: agents can hold=0, buy=1, or sell=2.
hidden_layer_sizes: List = [128, 256, 256, 128],
activation: str = "relu",
):
self._state_dim = state_dim
self._action_dim = action_dim
self._hidden_layer_sizes = hidden_layer_sizes
self._activation = activation
self._model = tf.keras.Sequential()
self._model.add(
Dense(
units=self._hidden_layer_sizes[0],
input_dim=self._state_dim,
activation=self._activation,
)
)
for i in range(2, len(self._hidden_layer_sizes)):
self._model.add(
Dense(self._hidden_layer_sizes[i], activation=self._activation)
)
self._model.add(Dense(self._action_dim, activation="linear"))
def get_model(self) -> tf.keras.Model:
return self._model
|
normal
|
{
"blob_id": "a3e655350fb5fe7999bea4a87fb62c7698fb63f1",
"index": 6663,
"step-1": "<mask token>\n\n\nclass QNetwork:\n <mask token>\n\n def __init__(self, state_dim: int, action_dim: int=3,\n hidden_layer_sizes: List=[128, 256, 256, 128], activation: str='relu'):\n self._state_dim = state_dim\n self._action_dim = action_dim\n self._hidden_layer_sizes = hidden_layer_sizes\n self._activation = activation\n self._model = tf.keras.Sequential()\n self._model.add(Dense(units=self._hidden_layer_sizes[0], input_dim=\n self._state_dim, activation=self._activation))\n for i in range(2, len(self._hidden_layer_sizes)):\n self._model.add(Dense(self._hidden_layer_sizes[i], activation=\n self._activation))\n self._model.add(Dense(self._action_dim, activation='linear'))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass QNetwork:\n <mask token>\n\n def __init__(self, state_dim: int, action_dim: int=3,\n hidden_layer_sizes: List=[128, 256, 256, 128], activation: str='relu'):\n self._state_dim = state_dim\n self._action_dim = action_dim\n self._hidden_layer_sizes = hidden_layer_sizes\n self._activation = activation\n self._model = tf.keras.Sequential()\n self._model.add(Dense(units=self._hidden_layer_sizes[0], input_dim=\n self._state_dim, activation=self._activation))\n for i in range(2, len(self._hidden_layer_sizes)):\n self._model.add(Dense(self._hidden_layer_sizes[i], activation=\n self._activation))\n self._model.add(Dense(self._action_dim, activation='linear'))\n\n def get_model(self) ->tf.keras.Model:\n return self._model\n",
"step-3": "<mask token>\n\n\nclass QNetwork:\n \"\"\"Create the neural network architecture for the DQN agent.\"\"\"\n\n def __init__(self, state_dim: int, action_dim: int=3,\n hidden_layer_sizes: List=[128, 256, 256, 128], activation: str='relu'):\n self._state_dim = state_dim\n self._action_dim = action_dim\n self._hidden_layer_sizes = hidden_layer_sizes\n self._activation = activation\n self._model = tf.keras.Sequential()\n self._model.add(Dense(units=self._hidden_layer_sizes[0], input_dim=\n self._state_dim, activation=self._activation))\n for i in range(2, len(self._hidden_layer_sizes)):\n self._model.add(Dense(self._hidden_layer_sizes[i], activation=\n self._activation))\n self._model.add(Dense(self._action_dim, activation='linear'))\n\n def get_model(self) ->tf.keras.Model:\n return self._model\n",
"step-4": "from typing import List\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense\n<mask token>\n\n\nclass QNetwork:\n \"\"\"Create the neural network architecture for the DQN agent.\"\"\"\n\n def __init__(self, state_dim: int, action_dim: int=3,\n hidden_layer_sizes: List=[128, 256, 256, 128], activation: str='relu'):\n self._state_dim = state_dim\n self._action_dim = action_dim\n self._hidden_layer_sizes = hidden_layer_sizes\n self._activation = activation\n self._model = tf.keras.Sequential()\n self._model.add(Dense(units=self._hidden_layer_sizes[0], input_dim=\n self._state_dim, activation=self._activation))\n for i in range(2, len(self._hidden_layer_sizes)):\n self._model.add(Dense(self._hidden_layer_sizes[i], activation=\n self._activation))\n self._model.add(Dense(self._action_dim, activation='linear'))\n\n def get_model(self) ->tf.keras.Model:\n return self._model\n",
"step-5": "from typing import List\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense\n\n\"\"\"Possible agent network structures implemented as Tensorflow Modules\"\"\"\n\n\nclass QNetwork:\n \"\"\"Create the neural network architecture for the DQN agent.\"\"\"\n\n def __init__(\n self,\n state_dim: int,\n action_dim: int = 3, # Default: agents can hold=0, buy=1, or sell=2.\n hidden_layer_sizes: List = [128, 256, 256, 128],\n activation: str = \"relu\",\n ):\n\n self._state_dim = state_dim\n self._action_dim = action_dim\n self._hidden_layer_sizes = hidden_layer_sizes\n self._activation = activation\n\n self._model = tf.keras.Sequential()\n self._model.add(\n Dense(\n units=self._hidden_layer_sizes[0],\n input_dim=self._state_dim,\n activation=self._activation,\n )\n )\n\n for i in range(2, len(self._hidden_layer_sizes)):\n self._model.add(\n Dense(self._hidden_layer_sizes[i], activation=self._activation)\n )\n\n self._model.add(Dense(self._action_dim, activation=\"linear\"))\n\n def get_model(self) -> tf.keras.Model:\n return self._model\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from PIL import Image
from flask_restplus import Namespace, Resource
from werkzeug.datastructures import FileStorage
from core.models.depthinthewild import DepthInTheWild
from core.utils import serve_pil_image
api = Namespace('nyudepth', description='Models Trained on NYUDepth')
upload_parser = api.parser()
upload_parser.add_argument('image', location='files', type=FileStorage,
required=True)
@api.route('/depthinthewild/transform')
@api.expect(upload_parser)
class DepthInTheWildDepthTransform(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
_, depth_map_img = hourglass.transform(image)
return serve_pil_image(depth_map_img)
@api.route('/depthinthewild/transform_raw')
@api.expect(upload_parser)
class DepthInTheWildDepthTransformRaw(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
depth_map, _ = hourglass.transform(image)
return dict(depth_map=depth_map)
|
normal
|
{
"blob_id": "acf409f2e56cd16b7dc07476b49b9c18675f7775",
"index": 5540,
"step-1": "<mask token>\n\n\[email protected]('/depthinthewild/transform')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n <mask token>\n\n\[email protected]('/depthinthewild/transform_raw')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-2": "<mask token>\nupload_parser.add_argument('image', location='files', type=FileStorage,\n required=True)\n\n\[email protected]('/depthinthewild/transform')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n _, depth_map_img = hourglass.transform(image)\n return serve_pil_image(depth_map_img)\n\n\[email protected]('/depthinthewild/transform_raw')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-3": "<mask token>\napi = Namespace('nyudepth', description='Models Trained on NYUDepth')\nupload_parser = api.parser()\nupload_parser.add_argument('image', location='files', type=FileStorage,\n required=True)\n\n\[email protected]('/depthinthewild/transform')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n _, depth_map_img = hourglass.transform(image)\n return serve_pil_image(depth_map_img)\n\n\[email protected]('/depthinthewild/transform_raw')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-4": "from PIL import Image\nfrom flask_restplus import Namespace, Resource\nfrom werkzeug.datastructures import FileStorage\nfrom core.models.depthinthewild import DepthInTheWild\nfrom core.utils import serve_pil_image\napi = Namespace('nyudepth', description='Models Trained on NYUDepth')\nupload_parser = api.parser()\nupload_parser.add_argument('image', location='files', type=FileStorage,\n required=True)\n\n\[email protected]('/depthinthewild/transform')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n _, depth_map_img = hourglass.transform(image)\n return serve_pil_image(depth_map_img)\n\n\[email protected]('/depthinthewild/transform_raw')\[email protected](upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_triads(trio, final_str):
list_occur_zero = [i for i in range(len(final_str)) if final_str.
startswith(trio + '0', i)]
list_occur_one = [i for i in range(len(final_str)) if final_str.
startswith(trio + '1', i)]
return [len(list_occur_zero), len(list_occur_one)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def sum_string(string):
list_chars = [zerone for zerone in string if zerone in ['0', '1']]
return list_chars
def check_triads(trio, final_str):
list_occur_zero = [i for i in range(len(final_str)) if final_str.
startswith(trio + '0', i)]
list_occur_one = [i for i in range(len(final_str)) if final_str.
startswith(trio + '1', i)]
return [len(list_occur_zero), len(list_occur_one)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def sum_string(string):
list_chars = [zerone for zerone in string if zerone in ['0', '1']]
return list_chars
def check_triads(trio, final_str):
list_occur_zero = [i for i in range(len(final_str)) if final_str.
startswith(trio + '0', i)]
list_occur_one = [i for i in range(len(final_str)) if final_str.
startswith(trio + '1', i)]
return [len(list_occur_zero), len(list_occur_one)]
<|reserved_special_token_0|>
while len(list_str) < 100:
print('Print a random string containing 0 or 1:')
number_str = input()
list_str.extend(sum_string(number_str))
if len(list_str) < 100:
print(
f'Current data length is {len(list_str)}, {100 - len(list_str)} symbols left'
)
print("""
Final data string:""")
<|reserved_special_token_0|>
print(f'{final_st}\n')
for tri in list_triads:
values = check_triads(tri, final_st)
print(f'{tri}: {values[0]},{values[1]}')
<|reserved_special_token_1|>
def sum_string(string):
list_chars = [zerone for zerone in string if zerone in ["0", "1"]]
return list_chars
def check_triads(trio, final_str):
list_occur_zero = [i for i in range(len(final_str)) if final_str.startswith(trio + '0', i)]
list_occur_one = [i for i in range(len(final_str)) if final_str.startswith(trio + '1', i)]
return [len(list_occur_zero), len(list_occur_one)]
number_str = ""
list_str = []
list_triads = ['000', '001', '010', '011', '100', '101', '110', '111']
while len(list_str) < 100:
print('Print a random string containing 0 or 1:')
number_str = input()
list_str.extend(sum_string(number_str))
if len(list_str) < 100:
print(f'Current data length is {len(list_str)}, {(100 - len(list_str))} symbols left')
print("\nFinal data string:")
final_st = ''.join(list_str)
print(f"{final_st}\n")
for tri in list_triads:
values = check_triads(tri, final_st)
print(f"{tri}: {values[0]},{values[1]}")
|
flexible
|
{
"blob_id": "29304bdbf93b0b1308025db1d35a92346c6dcbe0",
"index": 3799,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_triads(trio, final_str):\n list_occur_zero = [i for i in range(len(final_str)) if final_str.\n startswith(trio + '0', i)]\n list_occur_one = [i for i in range(len(final_str)) if final_str.\n startswith(trio + '1', i)]\n return [len(list_occur_zero), len(list_occur_one)]\n\n\n<mask token>\n",
"step-3": "def sum_string(string):\n list_chars = [zerone for zerone in string if zerone in ['0', '1']]\n return list_chars\n\n\ndef check_triads(trio, final_str):\n list_occur_zero = [i for i in range(len(final_str)) if final_str.\n startswith(trio + '0', i)]\n list_occur_one = [i for i in range(len(final_str)) if final_str.\n startswith(trio + '1', i)]\n return [len(list_occur_zero), len(list_occur_one)]\n\n\n<mask token>\n",
"step-4": "def sum_string(string):\n list_chars = [zerone for zerone in string if zerone in ['0', '1']]\n return list_chars\n\n\ndef check_triads(trio, final_str):\n list_occur_zero = [i for i in range(len(final_str)) if final_str.\n startswith(trio + '0', i)]\n list_occur_one = [i for i in range(len(final_str)) if final_str.\n startswith(trio + '1', i)]\n return [len(list_occur_zero), len(list_occur_one)]\n\n\n<mask token>\nwhile len(list_str) < 100:\n print('Print a random string containing 0 or 1:')\n number_str = input()\n list_str.extend(sum_string(number_str))\n if len(list_str) < 100:\n print(\n f'Current data length is {len(list_str)}, {100 - len(list_str)} symbols left'\n )\nprint(\"\"\"\nFinal data string:\"\"\")\n<mask token>\nprint(f'{final_st}\\n')\nfor tri in list_triads:\n values = check_triads(tri, final_st)\n print(f'{tri}: {values[0]},{values[1]}')\n",
"step-5": "def sum_string(string):\n list_chars = [zerone for zerone in string if zerone in [\"0\", \"1\"]]\n return list_chars\n\n\ndef check_triads(trio, final_str):\n list_occur_zero = [i for i in range(len(final_str)) if final_str.startswith(trio + '0', i)]\n list_occur_one = [i for i in range(len(final_str)) if final_str.startswith(trio + '1', i)]\n\n return [len(list_occur_zero), len(list_occur_one)]\n\n\nnumber_str = \"\"\nlist_str = []\nlist_triads = ['000', '001', '010', '011', '100', '101', '110', '111']\n\nwhile len(list_str) < 100:\n print('Print a random string containing 0 or 1:')\n number_str = input()\n list_str.extend(sum_string(number_str))\n\n if len(list_str) < 100:\n print(f'Current data length is {len(list_str)}, {(100 - len(list_str))} symbols left')\n\nprint(\"\\nFinal data string:\")\nfinal_st = ''.join(list_str)\nprint(f\"{final_st}\\n\")\n\nfor tri in list_triads:\n values = check_triads(tri, final_st)\n print(f\"{tri}: {values[0]},{values[1]}\")\n\n\n",
"step-ids": [
0,
1,
2,
3,
5
]
}
|
[
0,
1,
2,
3,
5
] |
import cv2 as cv
#! THESE ARE IMAGES THAT AREN'T DOWNSIZED
#original_image_1 = cv.imread("hamburger_face.JPG")
#original_image_2 = cv.imread("hammock_reading.JPG")
#original_image_3 = cv.imread("sofa_face.JPG")
#original_image_4 = cv.imread("frisbee_team.JPG")
original_image_5 = cv.imread("mans_face.JPG")
# TO PRINT OUT ARRAY AND DIMENSIONS
# print(original_image)
# print(original_image.shape)
#grayscale_image = cv.cvtColor(original_image_1, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_2, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_3, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_4, cv.COLOR_BGR2GRAY)
grayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)
# TO PRINT OUT GRAYSCALE IMG
#cv.imshow("gray_img", grayscale_image)
#cv.waitKey(0)
#cv.destroyAllWindows()
face_cascade = cv.CascadeClassifier('haar_cascade_front.xml')
detected_faces = face_cascade.detectMultiScale(grayscale_image)
# PRINTS COORDINATES OF FACES
#print(detected_faces)
for face in detected_faces:
x , y , w , h = face
cv.rectangle(original_image_5, (x, y), (x + w , y + h ), (0 , 255 , 0), 2)
cv.imshow("orig_img", original_image_5)
cv.waitKey(0)
cv.destroyAllWindows()
|
normal
|
{
"blob_id": "d0bd08bea65878f5fccfc4affecdf53cc36179df",
"index": 6633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-3": "<mask token>\noriginal_image_5 = cv.imread('mans_face.JPG')\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-4": "import cv2 as cv\noriginal_image_5 = cv.imread('mans_face.JPG')\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-5": "import cv2 as cv\r\n\r\n#! THESE ARE IMAGES THAT AREN'T DOWNSIZED\r\n#original_image_1 = cv.imread(\"hamburger_face.JPG\")\r\n#original_image_2 = cv.imread(\"hammock_reading.JPG\")\r\n#original_image_3 = cv.imread(\"sofa_face.JPG\")\r\n#original_image_4 = cv.imread(\"frisbee_team.JPG\")\r\noriginal_image_5 = cv.imread(\"mans_face.JPG\")\r\n\r\n# TO PRINT OUT ARRAY AND DIMENSIONS\r\n# print(original_image)\r\n# print(original_image.shape)\r\n\r\n#grayscale_image = cv.cvtColor(original_image_1, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_2, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_3, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_4, cv.COLOR_BGR2GRAY)\r\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\r\n\r\n# TO PRINT OUT GRAYSCALE IMG\r\n#cv.imshow(\"gray_img\", grayscale_image)\r\n#cv.waitKey(0)\r\n#cv.destroyAllWindows()\r\n\r\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\r\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\r\n\r\n# PRINTS COORDINATES OF FACES\r\n#print(detected_faces)\r\n\r\nfor face in detected_faces:\r\n x , y , w , h = face\r\n cv.rectangle(original_image_5, (x, y), (x + w , y + h ), (0 , 255 , 0), 2)\r\n\r\ncv.imshow(\"orig_img\", original_image_5)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import re
from bs4 import BeautifulSoup
from bs4.element import NavigableString, Tag
from common import dir_path
def is_element(el, tag):
return isinstance(el, Tag) and el.name == tag
class ElemIterator():
def __init__(self, els):
self.els = els
self.i = 0
def peek(self):
try:
return self.els[self.i]
except IndexError:
return None
def __next__(self):
self.i += 1
return self.els[self.i - 1]
def hasNext(self):
return len(self.els) > (self.i)
def peek_till(self, tag):
while not is_element(self.peek(), tag):
self.__next__()
def next_till(self, tag):
self.peek_till(tag)
self.__next__()
def parse_lines(iter_):
iter_.peek_till('strong')
county = []
while iter_.hasNext():
county += [iter_.__next__()]
if is_element(iter_.peek(), 'strong'):
yield ElemIterator(county)
county = []
yield ElemIterator(county)
county = []
def parse_emails_url(iter_):
emails = []
url = None
try:
while True:
iter_.peek_till('a')
email = iter_.__next__()
href = email['href']
if href.startswith('mailto:'):
if href[7:]:
emails += [href[7:]]
else:
emails += [email.text]
else:
url = href
except IndexError:
pass
return emails, url
def parse_url(iter_):
iter_.peek_till('a')
link = iter_.__next__()
href = link['href']
assert not href.startswith('mailto:')
return [href]
def parse_county(iter_):
county_title = iter_.__next__().text.strip().title()
locale = re.match('(.*) (City|County)', county_title).group(0)
if county_title.startswith('Clark County Elections Mailing Address'):
emails, url = parse_emails_url(iter_)
return {
'locale': locale,
'county': locale,
'emails': emails,
}
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
if 'Clerk' in el or 'Registrar' in el:
official = el.strip().split(',')[0]
break
address = []
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
address += [el.strip()]
if re.search(r'Nevada \d{5}', el) or re.search(r'NV \d{5}', el):
break
el = iter_.__next__()
el = iter_.__next__()
if isinstance(el, NavigableString):
el = el.replace(u'\xa0', ' ') # replace non-breaking space
matches1 = re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX (\(\d{3}\) \d{3}-\d{4})', el)
matches2 = re.search(r'(\(\d{3}\) \d{3}-VOTE \(\d{4}\)) FAX (\(\d{3}\) \d{3}-\d{4})', el)
if matches1:
phone = matches1.group(1)
fax = matches1.group(2)
elif matches2:
phone = matches2.group(1)
fax = matches2.group(2)
else:
print(county_title)
print(el)
print(re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX', el))
assert False
emails, url = parse_emails_url(iter_)
init = {'city': locale} if locale.endswith('City') else {'county': locale}
return {
**init,
'locale': locale,
'official': official,
'address': ', '.join(address),
'emails': list(set(emails)),
'phones': [phone],
'faxes': [fax],
'url': url,
}
def main():
# Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information
# But it's behind a javascript test
with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:
page = fh.read()
soup = BeautifulSoup(page, 'lxml')
ps = soup.select('div.content_area > p')
iter_ = ElemIterator([x for p in ps for x in p.children])
raw_counties = [parse_county(county) for county in parse_lines(iter_)]
merge_counties = {}
for county in raw_counties:
locale = county['locale']
if locale in merge_counties:
merge_counties[locale]['emails'] += county['emails']
else:
merge_counties[locale] = county
counties = list(merge_counties.values())
assert len(counties) == len(raw_counties) - 1
with open('public/nevada.json', 'w') as fh:
json.dump(counties, fh)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "cb08f64d1ad7e53f1041684d4ca4ef65036c138d",
"index": 44,
"step-1": "<mask token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\n<mask token>\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\n<mask token>\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\nimport re\nfrom bs4 import BeautifulSoup\nfrom bs4.element import NavigableString, Tag\nfrom common import dir_path\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import json\nimport re\nfrom bs4 import BeautifulSoup\nfrom bs4.element import NavigableString, Tag\n\nfrom common import dir_path\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator():\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > (self.i)\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {\n 'locale': locale,\n 'county': locale,\n 'emails': emails,\n }\n\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search(r'Nevada \\d{5}', el) or re.search(r'NV \\d{5}', el):\n break\n\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ') # replace non-breaking space\n matches1 = re.search(r'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el)\n matches2 = re.search(r'(\\(\\d{3}\\) \\d{3}-VOTE \\(\\d{4}\\)) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search(r'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX', el))\n assert False\n\n emails, url = parse_emails_url(iter_)\n\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n\n return {\n **init,\n 'locale': locale,\n 'official': official,\n 'address': ', '.join(address),\n 'emails': list(set(emails)),\n 'phones': [phone],\n 'faxes': [fax],\n 'url': url,\n }\n\n\ndef main():\n # Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information\n # But it's behind a javascript test\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
10,
12,
14,
15,
16
]
}
|
[
10,
12,
14,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('/home', views.home), url('/about', views.about)]
<|reserved_special_token_1|>
from django.conf.urls import url
from tree import views
urlpatterns = [url('/home', views.home), url('/about', views.about)]
<|reserved_special_token_1|>
from django.conf.urls import url
from tree import views
urlpatterns = [
url('/home', views.home),
url('/about', views.about),
]
|
flexible
|
{
"blob_id": "3313f01ed98433f4b150c4d8e877ac09eb8403b4",
"index": 5652,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('/home', views.home), url('/about', views.about)]\n",
"step-3": "from django.conf.urls import url\nfrom tree import views\nurlpatterns = [url('/home', views.home), url('/about', views.about)]\n",
"step-4": "\nfrom django.conf.urls import url\nfrom tree import views\n\nurlpatterns = [\n url('/home', views.home),\n url('/about', views.about),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(12):
mp = monthlyPaymentRate * rb
rb = rb - mp
rb = rb + rb * monthlyir
print('remaining balance: ', round(rb, 2))
<|reserved_special_token_1|>
balance = 42
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
monthlyir = annualInterestRate / 12
rb = balance
for i in range(12):
mp = monthlyPaymentRate * rb
rb = rb - mp
rb = rb + rb * monthlyir
print('remaining balance: ', round(rb, 2))
<|reserved_special_token_1|>
balance=42
annualInterestRate=0.20
monthlyPaymentRate=0.04
monthlyir = annualInterestRate/12
rb=balance
for i in range(12):
mp = monthlyPaymentRate * rb
rb=rb-mp
rb=rb+rb*monthlyir
print('remaining balance: ',round(rb,2))
|
flexible
|
{
"blob_id": "1429524b0ae3b679bc3d4386dd17ed50b0fff381",
"index": 146,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n mp = monthlyPaymentRate * rb\n rb = rb - mp\n rb = rb + rb * monthlyir\nprint('remaining balance: ', round(rb, 2))\n",
"step-3": "balance = 42\nannualInterestRate = 0.2\nmonthlyPaymentRate = 0.04\nmonthlyir = annualInterestRate / 12\nrb = balance\nfor i in range(12):\n mp = monthlyPaymentRate * rb\n rb = rb - mp\n rb = rb + rb * monthlyir\nprint('remaining balance: ', round(rb, 2))\n",
"step-4": "balance=42\n\nannualInterestRate=0.20\n\nmonthlyPaymentRate=0.04\n\n\nmonthlyir = annualInterestRate/12\n\nrb=balance\n\n\nfor i in range(12):\n mp = monthlyPaymentRate * rb\n rb=rb-mp\n rb=rb+rb*monthlyir\n\nprint('remaining balance: ',round(rb,2))\n \n \n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__all__ = ['language']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__all__ = ['language']
from StringTemplate import *
|
flexible
|
{
"blob_id": "e70c25ce1d61437aacfe7fad0a51e096e1ce4f5d",
"index": 5212,
"step-1": "<mask token>\n",
"step-2": "__all__ = ['language']\n<mask token>\n",
"step-3": "__all__ = ['language']\nfrom StringTemplate import *\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def computeDice(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError(
'Shape mismatch: im1 and im2 must have the same shape.')
intersection = np.logical_and(im1, im2)
dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())
if math.isnan(dice):
return 0
return dice
def main():
num_testing_patients = 4
n_labels = 1
normalize = True
modes = ['flair']
dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',
num_patients=num_testing_patients, modes=modes)
dataHandler.loadData()
dataHandler.preprocessForNetwork()
x_test = np.array(dataHandler.X)
x_seg_test = dataHandler.labels
dataHandler.clear()
segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',
custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,
'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':
combinedDiceAndChamfer, 'combinedHausdorffAndDice':
combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':
dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,
'dice_coef_multilabel_loss': dice_coef_multilabel_loss})
if normalize:
mu = np.mean(x_test)
sigma = np.std(x_test)
x_test -= mu
x_test /= sigma
decoded_imgs = segnet.predict(x_test)
if n_labels > 1:
decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]
else:
for x in x_seg_test:
x[x > 0.5] = 1
x[x < 0.5] = 0
for x in decoded_imgs:
x[x > 0.5] = 1
x[x < 0.5] = 0
decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in
decoded_imgs]
N = len(decoded_imgs)
avg_dice = 0
for i in range(N):
foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)
dice = computeDice(x_seg_test[i], foo)
avg_dice = avg_dice + dice
print(str(avg_dice / N))
for i in range(N):
fig = plt.figure()
plt.gray()
fig.add_subplot(1, 3, 1)
plt.imshow(x_test[i, :, :, 0])
plt.axis('off')
plt.title('Original')
fig.add_subplot(1, 3, 2)
plt.imshow(x_seg_test[i])
plt.axis('off')
plt.title('GT Segment')
fig.add_subplot(1, 3, 3)
plt.imshow(decoded_imgs[i])
plt.axis('off')
plt.title('Predicted Segment')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
<|reserved_special_token_0|>
sys.path.append(DATA_DIR)
def computeDice(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError(
'Shape mismatch: im1 and im2 must have the same shape.')
intersection = np.logical_and(im1, im2)
dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())
if math.isnan(dice):
return 0
return dice
def main():
num_testing_patients = 4
n_labels = 1
normalize = True
modes = ['flair']
dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',
num_patients=num_testing_patients, modes=modes)
dataHandler.loadData()
dataHandler.preprocessForNetwork()
x_test = np.array(dataHandler.X)
x_seg_test = dataHandler.labels
dataHandler.clear()
segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',
custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,
'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':
combinedDiceAndChamfer, 'combinedHausdorffAndDice':
combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':
dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,
'dice_coef_multilabel_loss': dice_coef_multilabel_loss})
if normalize:
mu = np.mean(x_test)
sigma = np.std(x_test)
x_test -= mu
x_test /= sigma
decoded_imgs = segnet.predict(x_test)
if n_labels > 1:
decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]
else:
for x in x_seg_test:
x[x > 0.5] = 1
x[x < 0.5] = 0
for x in decoded_imgs:
x[x > 0.5] = 1
x[x < 0.5] = 0
decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in
decoded_imgs]
N = len(decoded_imgs)
avg_dice = 0
for i in range(N):
foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)
dice = computeDice(x_seg_test[i], foo)
avg_dice = avg_dice + dice
print(str(avg_dice / N))
for i in range(N):
fig = plt.figure()
plt.gray()
fig.add_subplot(1, 3, 1)
plt.imshow(x_test[i, :, :, 0])
plt.axis('off')
plt.title('Original')
fig.add_subplot(1, 3, 2)
plt.imshow(x_seg_test[i])
plt.axis('off')
plt.title('GT Segment')
fig.add_subplot(1, 3, 3)
plt.imshow(decoded_imgs[i])
plt.axis('off')
plt.title('Predicted Segment')
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
<|reserved_special_token_0|>
DATA_DIR = os.path.abspath('../')
sys.path.append(DATA_DIR)
def computeDice(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError(
'Shape mismatch: im1 and im2 must have the same shape.')
intersection = np.logical_and(im1, im2)
dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())
if math.isnan(dice):
return 0
return dice
def main():
num_testing_patients = 4
n_labels = 1
normalize = True
modes = ['flair']
dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',
num_patients=num_testing_patients, modes=modes)
dataHandler.loadData()
dataHandler.preprocessForNetwork()
x_test = np.array(dataHandler.X)
x_seg_test = dataHandler.labels
dataHandler.clear()
segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',
custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,
'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':
combinedDiceAndChamfer, 'combinedHausdorffAndDice':
combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':
dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,
'dice_coef_multilabel_loss': dice_coef_multilabel_loss})
if normalize:
mu = np.mean(x_test)
sigma = np.std(x_test)
x_test -= mu
x_test /= sigma
decoded_imgs = segnet.predict(x_test)
if n_labels > 1:
decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]
else:
for x in x_seg_test:
x[x > 0.5] = 1
x[x < 0.5] = 0
for x in decoded_imgs:
x[x > 0.5] = 1
x[x < 0.5] = 0
decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in
decoded_imgs]
N = len(decoded_imgs)
avg_dice = 0
for i in range(N):
foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)
dice = computeDice(x_seg_test[i], foo)
avg_dice = avg_dice + dice
print(str(avg_dice / N))
for i in range(N):
fig = plt.figure()
plt.gray()
fig.add_subplot(1, 3, 1)
plt.imshow(x_test[i, :, :, 0])
plt.axis('off')
plt.title('Original')
fig.add_subplot(1, 3, 2)
plt.imshow(x_seg_test[i])
plt.axis('off')
plt.title('GT Segment')
fig.add_subplot(1, 3, 3)
plt.imshow(decoded_imgs[i])
plt.axis('off')
plt.title('Predicted Segment')
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import os
from keras.utils import np_utils
from _codecs import decode
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from DataHandlers.SegNetDataHandler import SegNetDataHandler
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D
import math
from CustomLosses import dice_coef, dice_coef_multilabel, dice_coef_loss, combinedDiceAndChamfer, dice_coef_multilabel_loss, combinedHausdorffAndDice
from dipy.segment.mask import clean_cc_mask
DATA_DIR = os.path.abspath('../')
sys.path.append(DATA_DIR)
def computeDice(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError(
'Shape mismatch: im1 and im2 must have the same shape.')
intersection = np.logical_and(im1, im2)
dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())
if math.isnan(dice):
return 0
return dice
def main():
num_testing_patients = 4
n_labels = 1
normalize = True
modes = ['flair']
dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',
num_patients=num_testing_patients, modes=modes)
dataHandler.loadData()
dataHandler.preprocessForNetwork()
x_test = np.array(dataHandler.X)
x_seg_test = dataHandler.labels
dataHandler.clear()
segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',
custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,
'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':
combinedDiceAndChamfer, 'combinedHausdorffAndDice':
combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':
dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,
'dice_coef_multilabel_loss': dice_coef_multilabel_loss})
if normalize:
mu = np.mean(x_test)
sigma = np.std(x_test)
x_test -= mu
x_test /= sigma
decoded_imgs = segnet.predict(x_test)
if n_labels > 1:
decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]
else:
for x in x_seg_test:
x[x > 0.5] = 1
x[x < 0.5] = 0
for x in decoded_imgs:
x[x > 0.5] = 1
x[x < 0.5] = 0
decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in
decoded_imgs]
N = len(decoded_imgs)
avg_dice = 0
for i in range(N):
foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)
dice = computeDice(x_seg_test[i], foo)
avg_dice = avg_dice + dice
print(str(avg_dice / N))
for i in range(N):
fig = plt.figure()
plt.gray()
fig.add_subplot(1, 3, 1)
plt.imshow(x_test[i, :, :, 0])
plt.axis('off')
plt.title('Original')
fig.add_subplot(1, 3, 2)
plt.imshow(x_seg_test[i])
plt.axis('off')
plt.title('GT Segment')
fig.add_subplot(1, 3, 3)
plt.imshow(decoded_imgs[i])
plt.axis('off')
plt.title('Predicted Segment')
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
'''
Created on Jul 10, 2018
@author: daniel
'''
#from multiprocessing import Process, Manager
#from keras.utils import np_utils
import sys
import os
from keras.utils import np_utils
from _codecs import decode
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from DataHandlers.SegNetDataHandler import SegNetDataHandler
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D
import math
from CustomLosses import dice_coef, dice_coef_multilabel, dice_coef_loss, combinedDiceAndChamfer, dice_coef_multilabel_loss, combinedHausdorffAndDice
from dipy.segment.mask import clean_cc_mask
DATA_DIR = os.path.abspath("../")
sys.path.append(DATA_DIR)
def computeDice(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
intersection = np.logical_and(im1, im2)
dice = 2. * intersection.sum() / (im1.sum() + im2.sum())
if math.isnan(dice):
return 0
return dice
def main():
num_testing_patients = 4
n_labels = 1
normalize = True
modes = ["flair"]
dataHandler = SegNetDataHandler("Data/BRATS_2018/HGG_Testing",
num_patients = num_testing_patients,
modes = modes)
dataHandler.loadData()
dataHandler.preprocessForNetwork()
x_test = np.array(dataHandler.X)
x_seg_test = dataHandler.labels
dataHandler.clear()
segnet = load_model("Models/segnet_2018-10-28-14:37/model.h5", custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,
'MaxUnpooling2D':MaxUnpooling2D,
'combinedDiceAndChamfer':combinedDiceAndChamfer,
'combinedHausdorffAndDice': combinedHausdorffAndDice,
'dice_coef':dice_coef,
'dice_coef_loss':dice_coef_loss,
'dice_coef_multilabel': dice_coef_multilabel,
'dice_coef_multilabel_loss' : dice_coef_multilabel_loss})
if normalize:
mu = np.mean(x_test)
sigma = np.std(x_test)
x_test -= mu
x_test /= sigma
decoded_imgs = segnet.predict(x_test)
if n_labels > 1:
#x_seg_test = np_utils.to_categorical(x_seg_test)
#x_seg_test = np.argmax(x_seg_test, axis=3)
decoded_imgs = [np.argmax(x, axis = 1) for x in decoded_imgs]
else:
for x in x_seg_test:
x[x > 0.5] = 1
x[x < 0.5] = 0
for x in decoded_imgs:
x[x > 0.5] = 1
x[x < 0.5] = 0
decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in decoded_imgs]
N = len(decoded_imgs)
avg_dice = 0
for i in range(N):
foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)
dice = computeDice(x_seg_test[i], foo)
avg_dice = avg_dice + dice
print(str(avg_dice/N))
for i in range(N):
fig = plt.figure()
plt.gray();
fig.add_subplot(1,3,1)
plt.imshow(x_test[i,:,:,0])
plt.axis('off')
plt.title('Original')
fig.add_subplot(1,3,2)
plt.imshow(x_seg_test[i])
plt.axis('off')
plt.title('GT Segment')
fig.add_subplot(1,3,3)
plt.imshow(decoded_imgs[i])
plt.axis('off')
plt.title('Predicted Segment')
plt.show()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "cb03fcf9c9cb61b3546865fe40cc411745e1fc94",
"index": 6872,
"step-1": "<mask token>\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\nsys.path.append(DATA_DIR)\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\nDATA_DIR = os.path.abspath('../')\nsys.path.append(DATA_DIR)\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nimport os\nfrom keras.utils import np_utils\nfrom _codecs import decode\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom DataHandlers.SegNetDataHandler import SegNetDataHandler\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nfrom Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D\nimport math\nfrom CustomLosses import dice_coef, dice_coef_multilabel, dice_coef_loss, combinedDiceAndChamfer, dice_coef_multilabel_loss, combinedHausdorffAndDice\nfrom dipy.segment.mask import clean_cc_mask\nDATA_DIR = os.path.abspath('../')\nsys.path.append(DATA_DIR)\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nCreated on Jul 10, 2018\n\n@author: daniel\n'''\n\n#from multiprocessing import Process, Manager\n#from keras.utils import np_utils\nimport sys\nimport os\nfrom keras.utils import np_utils\nfrom _codecs import decode\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom DataHandlers.SegNetDataHandler import SegNetDataHandler\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nfrom Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D\nimport math\nfrom CustomLosses import dice_coef, dice_coef_multilabel, dice_coef_loss, combinedDiceAndChamfer, dice_coef_multilabel_loss, combinedHausdorffAndDice\nfrom dipy.segment.mask import clean_cc_mask\n\nDATA_DIR = os.path.abspath(\"../\")\nsys.path.append(DATA_DIR)\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n \n if im1.shape != im2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n intersection = np.logical_and(im1, im2)\n dice = 2. * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\ndef main():\n\n \n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = [\"flair\"]\n dataHandler = SegNetDataHandler(\"Data/BRATS_2018/HGG_Testing\", \n num_patients = num_testing_patients, \n modes = modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n\n segnet = load_model(\"Models/segnet_2018-10-28-14:37/model.h5\", custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D, \n 'MaxUnpooling2D':MaxUnpooling2D, \n 'combinedDiceAndChamfer':combinedDiceAndChamfer,\n 'combinedHausdorffAndDice': combinedHausdorffAndDice,\n 'dice_coef':dice_coef, \n 'dice_coef_loss':dice_coef_loss,\n 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss' : dice_coef_multilabel_loss})\n \n \n \n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n\n if n_labels > 1:\n #x_seg_test = np_utils.to_categorical(x_seg_test)\n #x_seg_test = np.argmax(x_seg_test, axis=3)\n decoded_imgs = [np.argmax(x, axis = 1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n \n\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in decoded_imgs]\n\n\n N = len(decoded_imgs)\n\n \n \n avg_dice = 0\n \n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice/N))\n \n \n for i in range(N):\n fig = plt.figure()\n plt.gray(); \n fig.add_subplot(1,3,1)\n plt.imshow(x_test[i,:,:,0])\n plt.axis('off')\n plt.title('Original')\n \n fig.add_subplot(1,3,2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n \n fig.add_subplot(1,3,3)\n\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n\n plt.show()\n \n\n\nif __name__ == \"__main__\":\n main() \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def test_mat():
model = models.load_metabolic_model('RECON2_mat')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7440
assert len(model.species) == 5063
def test_to_json():
model = models.load_metabolic_model('RECON2.2')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON1_xml')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON2_mat')
json = model.to_JSON()
assert isinstance(json, str)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_sbml_3():
model = models.load_metabolic_model('RECON1_xml')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 3742
assert len(model.species) == 2766
<|reserved_special_token_0|>
def test_mat():
model = models.load_metabolic_model('RECON2_mat')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7440
assert len(model.species) == 5063
def test_to_json():
model = models.load_metabolic_model('RECON2.2')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON1_xml')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON2_mat')
json = model.to_JSON()
assert isinstance(json, str)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_sbml_3():
model = models.load_metabolic_model('RECON1_xml')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 3742
assert len(model.species) == 2766
def test_sbml_2():
model = models.load_metabolic_model('RECON2.2')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7785
assert len(model.species) == 6047
def test_mat():
model = models.load_metabolic_model('RECON2_mat')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7440
assert len(model.species) == 5063
def test_to_json():
model = models.load_metabolic_model('RECON2.2')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON1_xml')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON2_mat')
json = model.to_JSON()
assert isinstance(json, str)
<|reserved_special_token_1|>
from compass import models
from compass.models.MetabolicModel import MetabolicModel
def test_sbml_3():
model = models.load_metabolic_model('RECON1_xml')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 3742
assert len(model.species) == 2766
def test_sbml_2():
model = models.load_metabolic_model('RECON2.2')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7785
assert len(model.species) == 6047
def test_mat():
model = models.load_metabolic_model('RECON2_mat')
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7440
assert len(model.species) == 5063
def test_to_json():
model = models.load_metabolic_model('RECON2.2')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON1_xml')
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model('RECON2_mat')
json = model.to_JSON()
assert isinstance(json, str)
<|reserved_special_token_1|>
from compass import models
from compass.models.MetabolicModel import MetabolicModel
def test_sbml_3():
model = models.load_metabolic_model("RECON1_xml")
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 3742
assert len(model.species) == 2766
def test_sbml_2():
model = models.load_metabolic_model("RECON2.2")
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7785
assert len(model.species) == 6047
def test_mat():
model = models.load_metabolic_model("RECON2_mat")
assert isinstance(model, MetabolicModel)
assert len(model.reactions) == 7440
assert len(model.species) == 5063
def test_to_json():
model = models.load_metabolic_model("RECON2.2")
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model("RECON1_xml")
json = model.to_JSON()
assert isinstance(json, str)
model = models.load_metabolic_model("RECON2_mat")
json = model.to_JSON()
assert isinstance(json, str)
|
flexible
|
{
"blob_id": "863bae04a90143ed942a478c4b71a2269e123bb5",
"index": 2980,
"step-1": "<mask token>\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n",
"step-2": "<mask token>\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model('RECON1_xml')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\n<mask token>\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n",
"step-3": "<mask token>\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model('RECON1_xml')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\ndef test_sbml_2():\n model = models.load_metabolic_model('RECON2.2')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7785\n assert len(model.species) == 6047\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n",
"step-4": "from compass import models\nfrom compass.models.MetabolicModel import MetabolicModel\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model('RECON1_xml')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\ndef test_sbml_2():\n model = models.load_metabolic_model('RECON2.2')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7785\n assert len(model.species) == 6047\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n",
"step-5": "from compass import models\nfrom compass.models.MetabolicModel import MetabolicModel\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model(\"RECON1_xml\")\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\ndef test_sbml_2():\n model = models.load_metabolic_model(\"RECON2.2\")\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7785\n assert len(model.species) == 6047\n\n\ndef test_mat():\n model = models.load_metabolic_model(\"RECON2_mat\")\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model(\"RECON2.2\")\n json = model.to_JSON()\n assert isinstance(json, str)\n\n model = models.load_metabolic_model(\"RECON1_xml\")\n json = model.to_JSON()\n assert isinstance(json, str)\n\n model = models.load_metabolic_model(\"RECON2_mat\")\n json = model.to_JSON()\n assert isinstance(json, str)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.apps import AppConfig
class AdminrequestsConfig(AppConfig):
name = 'adminRequests'
|
normal
|
{
"blob_id": "e08b7a96c957895068e584a0564f02c52acd48ec",
"index": 3753,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AdminrequestsConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AdminrequestsConfig(AppConfig):\n name = 'adminRequests'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass AdminrequestsConfig(AppConfig):\n name = 'adminRequests'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import numpy as np
import sys
def avg (x):
return [sum(x[i])/row for i in range(col)]
def sd (x):
return [np.std(x[i]) for i in range(col)]
def cov (x, md_x):
cov_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
for k in range (row):
cov_xy[i][j]+=((data[i][k]-md_x[i])*(data[j][k]-md_x[j]))/(row)
return(cov_xy)
def cor (cov, sd_x):
cor_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
cor_xy[i][j] = cov[i][j]/(sd_x[i]*sd_x[j])
print("cov= ",cov[i][j],"sd i", sd_x[i], " sd k", sd_x[j],"cov/sd", cov[i][j]/(sd_x[i]*sd_x[j]))
return(cor_xy)
if __name__ == "__main__":
argv=sys.argv[:]
if len(argv)<2:
print("1 argument required. Provide data file name")
sys.exit(0)
data=pd.read_csv(argv[1],header= None)
row=data.shape[0]
col=data.shape[1]
print("** dataset dimensions **")
print(row)
print(col)
mean=avg(data)
stdev=sd(data)
print(stdev)
covar=cov(data, mean)
correl=cor(covar, stdev)
print("---------CORRELATION MATRIX---------")
print(correl)
|
normal
|
{
"blob_id": "ad3c5ed3d6a9aa83e69f53d3fec845e8e2b1c9c6",
"index": 883,
"step-1": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\n<mask token>\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\nif __name__ == '__main__':\n argv = sys.argv[:]\n if len(argv) < 2:\n print('1 argument required. Provide data file name')\n sys.exit(0)\n data = pd.read_csv(argv[1], header=None)\n row = data.shape[0]\n col = data.shape[1]\n print('** dataset dimensions **')\n print(row)\n print(col)\n mean = avg(data)\n stdev = sd(data)\n print(stdev)\n covar = cov(data, mean)\n correl = cor(covar, stdev)\n print('---------CORRELATION MATRIX---------')\n print(correl)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\nif __name__ == '__main__':\n argv = sys.argv[:]\n if len(argv) < 2:\n print('1 argument required. Provide data file name')\n sys.exit(0)\n data = pd.read_csv(argv[1], header=None)\n row = data.shape[0]\n col = data.shape[1]\n print('** dataset dimensions **')\n print(row)\n print(col)\n mean = avg(data)\n stdev = sd(data)\n print(stdev)\n covar = cov(data, mean)\n correl = cor(covar, stdev)\n print('---------CORRELATION MATRIX---------')\n print(correl)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport sys\n\ndef avg (x):\n return [sum(x[i])/row for i in range(col)]\n\ndef sd (x):\n return [np.std(x[i]) for i in range(col)]\n\ndef cov (x, md_x):\n cov_xy=[[0 for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range (col):\n for k in range (row):\n cov_xy[i][j]+=((data[i][k]-md_x[i])*(data[j][k]-md_x[j]))/(row)\n return(cov_xy)\n\ndef cor (cov, sd_x):\n cor_xy=[[0 for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range (col):\n cor_xy[i][j] = cov[i][j]/(sd_x[i]*sd_x[j])\n print(\"cov= \",cov[i][j],\"sd i\", sd_x[i], \" sd k\", sd_x[j],\"cov/sd\", cov[i][j]/(sd_x[i]*sd_x[j]))\n return(cor_xy)\n\n\nif __name__ == \"__main__\":\n \n argv=sys.argv[:]\n \n if len(argv)<2:\n print(\"1 argument required. Provide data file name\")\n sys.exit(0)\n \n data=pd.read_csv(argv[1],header= None)\n row=data.shape[0]\n col=data.shape[1]\n print(\"** dataset dimensions **\")\n print(row)\n print(col)\n mean=avg(data)\n stdev=sd(data)\n print(stdev)\n \n covar=cov(data, mean)\n correl=cor(covar, stdev)\n print(\"---------CORRELATION MATRIX---------\")\n print(correl)\n \n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
############################## Import Modules ##################################
import pandas as pd
import numpy as np
import re
from scipy import stats
import matplotlib.pyplot as plt
############################## Define Functions ################################
# generate list containing data of standard curve
def process_std(standard_input_file):
try:
with open(standard_input_file, 'r') as in_handle:
lin_reg_lst = []
for line in in_handle:
line = line.strip('\n')
lin_reg_lst.append(line)
except IOError:
print("Could not open " + standard_input_file + " for reading.")
quit(1)
return lin_reg_lst
# generate info_dict containing information about the samples
def process_info(info_file):
try:
info_dict = {}
with open(info_file, 'r') as in_handle:
for line in in_handle:
line = line.strip()
items = re.split(' ', line)
well_lst = re.split(',', items[1])
info_dict[items[0]] = {'wells': well_lst,
'conc': float(items[2]),
'dil': float(items[3])}
except IOError:
print("Could not open " + args.info + " for reading.")
quit(1)
return info_dict
# calculate substrate concentration from absorption values
def abs_to_subconc(meas_df, info_dict, m, c):
# find data series belonging to a sample
for sample in info_dict.keys():
for well in info_dict[sample]['wells']:
i = np.where(meas_df == well)
# convert absorption values to substrate concentration
for row in meas_df[i[0]]:
count = 1
for el in row:
if type(el) != str:
conc = (el - c)/m
meas_df[i[0], count] = conc
count += 1
return meas_df
# process blank to get slope
def process_blank(blank_file, std_m, std_c):
blank_df = pd.read_csv(blank_file)
blank_df = blank_df.to_numpy()
# define x values
i = np.where(blank_df == 'Time [s]')
# fall-back for case that time per well is measured
if len(i[0]) == 0:
b_arr = []
i = np.where(blank_df == 'Time [ms]')
# convert ms to s
for row in blank_df[i[0]]:
count = 1
arr = []
for el in row:
if type(el) != str:
sec = el*0.001
arr.append(sec)
count += 1
b_arr.append(arr)
blank_x = np.vstack(b_arr)
# make average for time
av_lst = []
for row in np.transpose(blank_x):
av = sum(row) / len(row)
av_lst.append(av)
blank_x = np.transpose(np.array(av_lst))
else:
blank_x = np.array(blank_df[i[0]][0, 1:])
# define y values
arr = []
for row in blank_df:
if re.search(r'^[A-Z]\d\d?$', row[0]):
arr.append(row[1:])
if len(arr) < 2:
blank_arr = np.array(arr)
else:
blank_arr = np.vstack(arr)
count_r = 0
for row in blank_arr:
count_c = 0
for el in row:
if type(el) != str:
conc = (el - std_c)/std_m
blank_arr[count_r, count_c] = conc
count_c += 1
count_r += 1
av_lst = []
for row in np.transpose(blank_arr):
av = sum(row) / len(row)
av_lst.append(av)
if len(av_lst) < 2:
blank_y = np.transpose(np.array(av_lst))
else:
blank_y = np.transpose(np.vstack(av_lst))
b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),
blank_y.astype(float))
return b_m
# calculate average activity and standard deviation of each sample
def act_calc(meas_df, info_dict, b_m, std_m, std_c):
act_dict = {}
# m_lin defines most linear part from first point
while True:
print("How many time intervals you want to take for the "
+ "analysis? (most linear part from first to x)")
m_lin = input()
if m_lin.isnumeric() == True and int(m_lin) > 1:
break
m_lin = int(m_lin)
# define volume per well
while True:
print("What is the volume per well? (in µL)")
well_v = input()
print("\n")
if well_v.isnumeric() == True:
break
# define x values
time = np.where(meas_df == 'Time [s]')
# fall-back for case that time per well is measured
if len(time[0]) == 0:
m_arr = []
time = np.where(meas_df == 'Time [ms]')
# convert ms to s
for row in meas_df[time[0]]:
arr = []
count = 1
for el in row:
if type(el) != str:
sec = el*0.001
arr.append(sec)
count += 1
m_arr.append(arr)
x = np.vstack(m_arr)
# make average for time values
av_lst = []
for row in np.transpose(x):
av = sum(row) / len(row)
av_lst.append(av)
x = np.transpose(np.array(av_lst[0:m_lin]))
else:
x = meas_df[time[0]]
x = np.array(x[0, 1:m_lin + 1])
# process sample data
for sample in info_dict.keys():
e_conc = info_dict[sample]['conc']
e_dil = info_dict[sample]['dil']
e_conc = float(e_conc)/ (float(e_dil)*1000)
for well in info_dict[sample]['wells']:
i = np.where(meas_df == well)
y = meas_df[i[0]]
y = np.array(y[0, 1:m_lin + 1])
m, c, r, p, stderr = stats.linregress(x.astype(float),
y.astype(float))
print(sample + ' >R²' + str(r))
# plot substrate decrease
plt.figure(1, figsize=[10,5], frameon=False)
plt.plot(x, y, 'x', markersize=2, label=sample)
plt.plot(x, m*x + c, 'r', linestyle='--', color='gray')
plt.savefig('activity_plot.png')
# calculate specific activity
m = abs(m - b_m)
sact = (m*60*int(well_v)) / (10*1000000*float(e_conc))
act_dict.setdefault(sample, [])
act_dict[sample].append(sact)
# calculate average specific activity per sample
summery_dict = {}
summery_dict['interval'] = m_lin
for sample in act_dict.keys():
av_sact = sum(act_dict[sample]) / len(act_dict[sample])
print("average specific activity of " + sample + " = "
+ str(av_sact) + " U/mg")
# calculate standard deviation per sample
std = np.std(act_dict[sample])
print("standard deviation for " + sample + ": +/-" + str(std))
# generate summery_dict for output file
summery_dict[sample] = {'av_sact': av_sact, 'std': std}
return summery_dict
# process summery_dict to generate output file
def gen_output(summery_dict, name):
try:
with open(name + '_activity.out', 'w') as out_handle:
out_handle.write('time interval from 1. to '
+ str(summery_dict['interval'])
+ '. was used for calculations.\n')
for sample in summery_dict.keys():
if sample == 'interval':
continue
else:
out_handle.write(str(sample) + ': s = '
+ str(summery_dict[sample]['av_sact'])
+ ' +/- '
+ str(summery_dict[sample]['std']) + '\n')
except IOError:
print("Could not open activity.out for writing.")
quit(1)
|
normal
|
{
"blob_id": "19949b07c866d66b3ef00b6a386bf89f03e06294",
"index": 7984,
"step-1": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\n<mask token>\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\n<mask token>\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst, 'conc': float(\n items[2]), 'dil': float(items[3])}\n except IOError:\n print('Could not open ' + args.info + ' for reading.')\n quit(1)\n return info_dict\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\n<mask token>\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst, 'conc': float(\n items[2]), 'dil': float(items[3])}\n except IOError:\n print('Could not open ' + args.info + ' for reading.')\n quit(1)\n return info_dict\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\ndef process_blank(blank_file, std_m, std_c):\n blank_df = pd.read_csv(blank_file)\n blank_df = blank_df.to_numpy()\n i = np.where(blank_df == 'Time [s]')\n if len(i[0]) == 0:\n b_arr = []\n i = np.where(blank_df == 'Time [ms]')\n for row in blank_df[i[0]]:\n count = 1\n arr = []\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n b_arr.append(arr)\n blank_x = np.vstack(b_arr)\n av_lst = []\n for row in np.transpose(blank_x):\n av = sum(row) / len(row)\n av_lst.append(av)\n blank_x = np.transpose(np.array(av_lst))\n else:\n blank_x = np.array(blank_df[i[0]][0, 1:])\n arr = []\n for row in blank_df:\n if re.search('^[A-Z]\\\\d\\\\d?$', row[0]):\n arr.append(row[1:])\n if len(arr) < 2:\n blank_arr = np.array(arr)\n else:\n blank_arr = np.vstack(arr)\n count_r = 0\n for row in blank_arr:\n count_c = 0\n for el in row:\n if type(el) != str:\n conc = (el - std_c) / std_m\n blank_arr[count_r, count_c] = conc\n count_c += 1\n count_r += 1\n av_lst = []\n for row in np.transpose(blank_arr):\n av = sum(row) / len(row)\n av_lst.append(av)\n if len(av_lst) < 2:\n blank_y = np.transpose(np.array(av_lst))\n else:\n blank_y = np.transpose(np.vstack(av_lst))\n b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),\n blank_y.astype(float))\n return b_m\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst, 'conc': float(\n items[2]), 'dil': float(items[3])}\n except IOError:\n print('Could not open ' + args.info + ' for reading.')\n quit(1)\n return info_dict\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\ndef process_blank(blank_file, std_m, std_c):\n blank_df = pd.read_csv(blank_file)\n blank_df = blank_df.to_numpy()\n i = np.where(blank_df == 'Time [s]')\n if len(i[0]) == 0:\n b_arr = []\n i = np.where(blank_df == 'Time [ms]')\n for row in blank_df[i[0]]:\n count = 1\n arr = []\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n b_arr.append(arr)\n blank_x = np.vstack(b_arr)\n av_lst = []\n for row in np.transpose(blank_x):\n av = sum(row) / len(row)\n av_lst.append(av)\n blank_x = np.transpose(np.array(av_lst))\n else:\n blank_x = np.array(blank_df[i[0]][0, 1:])\n arr = []\n for row in blank_df:\n if re.search('^[A-Z]\\\\d\\\\d?$', row[0]):\n arr.append(row[1:])\n if len(arr) < 2:\n blank_arr = np.array(arr)\n else:\n blank_arr = np.vstack(arr)\n count_r = 0\n for row in blank_arr:\n count_c = 0\n for el in row:\n if type(el) != str:\n conc = (el - std_c) / std_m\n blank_arr[count_r, count_c] = conc\n count_c += 1\n count_r += 1\n av_lst = []\n for row in np.transpose(blank_arr):\n av = sum(row) / len(row)\n av_lst.append(av)\n if len(av_lst) < 2:\n blank_y = np.transpose(np.array(av_lst))\n else:\n blank_y = np.transpose(np.vstack(av_lst))\n b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),\n blank_y.astype(float))\n return b_m\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\ndef gen_output(summery_dict, name):\n try:\n with open(name + '_activity.out', 'w') as out_handle:\n out_handle.write('time interval from 1. to ' + str(summery_dict\n ['interval']) + \"\"\". was used for calculations.\n\"\"\")\n for sample in summery_dict.keys():\n if sample == 'interval':\n continue\n else:\n out_handle.write(str(sample) + ': s = ' + str(\n summery_dict[sample]['av_sact']) + ' +/- ' + str(\n summery_dict[sample]['std']) + '\\n')\n except IOError:\n print('Could not open activity.out for writing.')\n quit(1)\n",
"step-5": "############################## Import Modules ##################################\nimport pandas as pd\nimport numpy as np\nimport re\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n############################## Define Functions ################################\n# generate list containing data of standard curve\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print(\"Could not open \" + standard_input_file + \" for reading.\")\n quit(1)\n return lin_reg_lst\n\n# generate info_dict containing information about the samples\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst,\n 'conc': float(items[2]),\n 'dil': float(items[3])}\n except IOError:\n print(\"Could not open \" + args.info + \" for reading.\")\n quit(1)\n return info_dict\n\n# calculate substrate concentration from absorption values\ndef abs_to_subconc(meas_df, info_dict, m, c):\n # find data series belonging to a sample\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n # convert absorption values to substrate concentration\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c)/m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n# process blank to get slope\ndef process_blank(blank_file, std_m, std_c):\n blank_df = pd.read_csv(blank_file)\n blank_df = blank_df.to_numpy()\n # define x values\n i = np.where(blank_df == 'Time [s]')\n # fall-back for case that time per well is measured \n if len(i[0]) == 0:\n b_arr = []\n i = np.where(blank_df == 'Time [ms]')\n # convert ms to s\n for row in blank_df[i[0]]:\n count = 1\n arr = []\n for el in row:\n if type(el) != str:\n sec = el*0.001\n arr.append(sec)\n count += 1\n b_arr.append(arr)\n blank_x = np.vstack(b_arr)\n # make average for time\n av_lst = []\n for row in np.transpose(blank_x):\n av = sum(row) / len(row)\n av_lst.append(av)\n blank_x = np.transpose(np.array(av_lst))\n else:\n blank_x = np.array(blank_df[i[0]][0, 1:])\n # define y values\n arr = []\n for row in blank_df:\n if re.search(r'^[A-Z]\\d\\d?$', row[0]):\n arr.append(row[1:])\n if len(arr) < 2:\n blank_arr = np.array(arr)\n else:\n blank_arr = np.vstack(arr)\n count_r = 0\n for row in blank_arr:\n count_c = 0\n for el in row:\n if type(el) != str:\n conc = (el - std_c)/std_m\n blank_arr[count_r, count_c] = conc\n count_c += 1\n count_r += 1\n av_lst = []\n for row in np.transpose(blank_arr):\n av = sum(row) / len(row)\n av_lst.append(av)\n if len(av_lst) < 2:\n blank_y = np.transpose(np.array(av_lst))\n else:\n blank_y = np.transpose(np.vstack(av_lst))\n b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),\n blank_y.astype(float))\n return b_m\n\n# calculate average activity and standard deviation of each sample\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n # m_lin defines most linear part from first point\n while True:\n print(\"How many time intervals you want to take for the \"\n + \"analysis? (most linear part from first to x)\")\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n # define volume per well\n while True:\n print(\"What is the volume per well? (in µL)\")\n well_v = input()\n print(\"\\n\")\n if well_v.isnumeric() == True:\n break\n # define x values\n time = np.where(meas_df == 'Time [s]')\n # fall-back for case that time per well is measured \n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n # convert ms to s\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el*0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n # make average for time values\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n # process sample data\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc)/ (float(e_dil)*1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float),\n y.astype(float))\n print(sample + ' >R²' + str(r))\n # plot substrate decrease\n plt.figure(1, figsize=[10,5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m*x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n # calculate specific activity\n m = abs(m - b_m)\n sact = (m*60*int(well_v)) / (10*1000000*float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n # calculate average specific activity per sample\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print(\"average specific activity of \" + sample + \" = \"\n + str(av_sact) + \" U/mg\")\n # calculate standard deviation per sample\n std = np.std(act_dict[sample])\n print(\"standard deviation for \" + sample + \": +/-\" + str(std))\n # generate summery_dict for output file\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n# process summery_dict to generate output file\ndef gen_output(summery_dict, name):\n try:\n with open(name + '_activity.out', 'w') as out_handle:\n out_handle.write('time interval from 1. to '\n + str(summery_dict['interval'])\n + '. was used for calculations.\\n')\n for sample in summery_dict.keys():\n if sample == 'interval':\n continue\n else:\n out_handle.write(str(sample) + ': s = '\n + str(summery_dict[sample]['av_sact'])\n + ' +/- '\n + str(summery_dict[sample]['std']) + '\\n')\n except IOError:\n print(\"Could not open activity.out for writing.\")\n quit(1)\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
from arnold import config
class TestMicrophone:
def setup_method(self, method):
self.config = config.SENSOR['microphone']
def test_config(self):
required_config = [
'card_number', 'device_index', 'sample_rate', 'phrase_time_limit',
'energy_threshold'
]
for config_key in required_config:
assert config_key in self.config
def test_listen(self):
# TODO: Figure out how to mock this
pass
def test_recognise_command(self):
# TODO: Figure out how to mock this
pass
|
normal
|
{
"blob_id": "164167590051fac3f3fd80c5ed82621ba55c4cc4",
"index": 9597,
"step-1": "<mask token>\n\n\nclass TestMicrophone:\n <mask token>\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMicrophone:\n <mask token>\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n\n def test_recognise_command(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass TestMicrophone:\n\n def setup_method(self, method):\n self.config = config.SENSOR['microphone']\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n\n def test_recognise_command(self):\n pass\n",
"step-4": "from arnold import config\n\n\nclass TestMicrophone:\n\n def setup_method(self, method):\n self.config = config.SENSOR['microphone']\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n\n def test_recognise_command(self):\n pass\n",
"step-5": "from arnold import config\n\n\nclass TestMicrophone:\n\n def setup_method(self, method):\n self.config = config.SENSOR['microphone']\n\n def test_config(self):\n required_config = [\n 'card_number', 'device_index', 'sample_rate', 'phrase_time_limit',\n 'energy_threshold'\n ]\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n # TODO: Figure out how to mock this\n pass\n\n def test_recognise_command(self):\n # TODO: Figure out how to mock this\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def findOrder(numCourses,prerequisites):
d={}
for i in prerequisites:
if i[0] not in d:
d[i[0]]=[i[1]]
if i[1] not in d:
d[i[1]]=[]
else:
d[i[0]].append(i[1])
res=[]
while d:
for i in range(numCourses):
if d[i] == []:
res.append(d[i])
tmp=d[i]
del d[i]
for j in d:
if tmp in d[j]:
del d[j][tmp]
print res
p = [[1,0],[2,0],[3,1],[3,2]]
n = 4
findOrder(n, p)
|
normal
|
{
"blob_id": "75b13f4985fcf26fb9f7fb040554b52b13c1806d",
"index": 4848,
"step-1": "def findOrder(numCourses,prerequisites):\n\td={}\n\tfor i in prerequisites:\n\t\tif i[0] not in d:\n\t\t\td[i[0]]=[i[1]]\n\t\t\tif i[1] not in d:\n\t\t\t\td[i[1]]=[]\n\t\telse:\n\t\t\td[i[0]].append(i[1])\n\tres=[]\n\twhile d:\n\t\tfor i in range(numCourses):\n\t\t\tif d[i] == []:\n\t\t\t\tres.append(d[i])\n\t\t\t\ttmp=d[i]\n\t\t\t\tdel d[i]\n\t\t\t\tfor j in d:\n\t\t\t\t\tif tmp in d[j]:\n\t\t\t\t\t\tdel d[j][tmp]\n\tprint res\n\np = [[1,0],[2,0],[3,1],[3,2]]\nn = 4\nfindOrder(n, p)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from colander_validators import (
email,
url)
def test_url():
assert url("ixmat.us") == True
assert url("http://bleh.net") == True
assert type(url("://ixmat.us")) == str
assert type(url("ixmat")) == str
def test_email():
assert email("[email protected]") == True
assert email("[email protected]") == True
assert type(email("barney")) == str
assert type(email("barney@dino")) == str
|
normal
|
{
"blob_id": "40637c7a5e45d0fe4184478a1be2e08e5040c93b",
"index": 8931,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_email():\n assert email('[email protected]') == True\n assert email('[email protected]') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-3": "<mask token>\n\n\ndef test_url():\n assert url('ixmat.us') == True\n assert url('http://bleh.net') == True\n assert type(url('://ixmat.us')) == str\n assert type(url('ixmat')) == str\n\n\ndef test_email():\n assert email('[email protected]') == True\n assert email('[email protected]') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-4": "from colander_validators import email, url\n\n\ndef test_url():\n assert url('ixmat.us') == True\n assert url('http://bleh.net') == True\n assert type(url('://ixmat.us')) == str\n assert type(url('ixmat')) == str\n\n\ndef test_email():\n assert email('[email protected]') == True\n assert email('[email protected]') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-5": "from colander_validators import (\n email,\n url)\n\n\ndef test_url():\n\n assert url(\"ixmat.us\") == True\n assert url(\"http://bleh.net\") == True\n assert type(url(\"://ixmat.us\")) == str\n assert type(url(\"ixmat\")) == str\n\n\ndef test_email():\n\n assert email(\"[email protected]\") == True\n assert email(\"[email protected]\") == True\n assert type(email(\"barney\")) == str\n assert type(email(\"barney@dino\")) == str\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
from pexpect import pxssh
import time
s = pxssh.pxssh()
ip = "" #replace ip address
username= "" #replace username
password= "" #replace password
s.login (ip, username, password)
print ("SSH session login successful")
s.sendline ('application stop')
s.prompt() # match the prompt
print("Stopping the app")
print("\nStarting the app")
s.sendline ('application start')
s.prompt()
print ("\nLogout")
s.logout()
|
normal
|
{
"blob_id": "dd9574ea08beb9bc5f1413afd63c751fd42cba67",
"index": 6406,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-3": "<mask token>\ns = pxssh.pxssh()\nip = ''\nusername = ''\npassword = ''\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-4": "from pexpect import pxssh\nimport time\ns = pxssh.pxssh()\nip = ''\nusername = ''\npassword = ''\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-5": "#!/usr/bin/env python3\n\nfrom pexpect import pxssh\nimport time\ns = pxssh.pxssh()\nip = \"\" #replace ip address\nusername= \"\" #replace username\npassword= \"\" #replace password\ns.login (ip, username, password)\nprint (\"SSH session login successful\")\ns.sendline ('application stop')\ns.prompt() # match the prompt\nprint(\"Stopping the app\")\n\nprint(\"\\nStarting the app\") \ns.sendline ('application start')\ns.prompt() \nprint (\"\\nLogout\")\ns.logout()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
from django.db import models
from django.utils import timezone
class Acoount(models.Model):
first_name = models.CharField("Ім\'я", max_length=50)
last_name = models.CharField('Прізвище', max_length=50)
username = models.CharField('Псевдонім', max_length=50)
email = models.CharField('Електронна почта', max_length=16)
password = models.CharField('Пароль', max_length=16)
def __str__(self):
return self.first_name + ' ' + self.last_name
class Meta:
verbose_name = 'Акаунт'
verbose_name_plural = 'Акаунти'
|
normal
|
{
"blob_id": "18c2fe40b51ad1489d55aa2be068a1c4f381a2a5",
"index": 553,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Acoount(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n",
"step-3": "<mask token>\n\n\nclass Acoount(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n",
"step-4": "import datetime\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass Acoount(models.Model):\n first_name = models.CharField(\"Ім'я\", max_length=50)\n last_name = models.CharField('Прізвище', max_length=50)\n username = models.CharField('Псевдонім', max_length=50)\n email = models.CharField('Електронна почта', max_length=16)\n password = models.CharField('Пароль', max_length=16)\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n",
"step-5": "import datetime\nfrom django.db import models\n\nfrom django.utils import timezone\n\n\nclass Acoount(models.Model):\n first_name = models.CharField(\"Ім\\'я\", max_length=50)\n last_name = models.CharField('Прізвище', max_length=50)\n username = models.CharField('Псевдонім', max_length=50)\n email = models.CharField('Електронна почта', max_length=16)\n password = models.CharField('Пароль', max_length=16)\n \n\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'",
"step-ids": [
0,
1,
2,
4,
5
]
}
|
[
0,
1,
2,
4,
5
] |
from django.test import TestCase
from recruitmentapp.apps.core.models import Competence
class CompetenceTest(TestCase):
def setUp(self):
self.competence = Competence.objects.create(name='mining')
self.competence.set_current_language('sv')
self.competence.name = 'gruvarbete'
self.competence.save()
def test_translation(self):
competence = Competence.objects.first()
self.assertEqual(competence.name, 'mining')
competence.set_current_language('sv')
self.assertEqual(competence.name, 'gruvarbete')
def test_translation_fallback(self):
competence = Competence.objects.first()
competence.set_current_language('fi')
self.assertEqual(competence.name, 'mining')
|
normal
|
{
"blob_id": "d7b0ff6549d854d21ad1d2d0f5a9e7f75f4ac1d5",
"index": 956,
"step-1": "<mask token>\n\n\nclass CompetenceTest(TestCase):\n <mask token>\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CompetenceTest(TestCase):\n <mask token>\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n\n def test_translation_fallback(self):\n competence = Competence.objects.first()\n competence.set_current_language('fi')\n self.assertEqual(competence.name, 'mining')\n",
"step-3": "<mask token>\n\n\nclass CompetenceTest(TestCase):\n\n def setUp(self):\n self.competence = Competence.objects.create(name='mining')\n self.competence.set_current_language('sv')\n self.competence.name = 'gruvarbete'\n self.competence.save()\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n\n def test_translation_fallback(self):\n competence = Competence.objects.first()\n competence.set_current_language('fi')\n self.assertEqual(competence.name, 'mining')\n",
"step-4": "from django.test import TestCase\nfrom recruitmentapp.apps.core.models import Competence\n\n\nclass CompetenceTest(TestCase):\n\n def setUp(self):\n self.competence = Competence.objects.create(name='mining')\n self.competence.set_current_language('sv')\n self.competence.name = 'gruvarbete'\n self.competence.save()\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n\n def test_translation_fallback(self):\n competence = Competence.objects.first()\n competence.set_current_language('fi')\n self.assertEqual(competence.name, 'mining')\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_required
def post_create(request):
"""
This makes sure that the form accpets a POST requests (of some data) or Nothing.
Without this the form would even accept empty data.
"""
form = PostForm(request.POST or None, request.FILES or None)
if request.POST:
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, 'Post created!')
return HttpResponseRedirect(instance.get_absolute_url())
else:
messages.error(request, 'Sorry! Something went wrong.',
extra_tags='')
context = {'title': 'Create Post', 'form': form}
return render(request, 'post/create.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_required
def post_create(request):
"""
This makes sure that the form accpets a POST requests (of some data) or Nothing.
Without this the form would even accept empty data.
"""
form = PostForm(request.POST or None, request.FILES or None)
if request.POST:
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, 'Post created!')
return HttpResponseRedirect(instance.get_absolute_url())
else:
messages.error(request, 'Sorry! Something went wrong.',
extra_tags='')
context = {'title': 'Create Post', 'form': form}
return render(request, 'post/create.html', context)
def post_view(request, slug):
instance = get_object_or_404(Post, slug=slug)
context = {'instance': instance}
return render(request, 'post/view.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .forms import PostForm
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from .models import Post
from django.contrib import messages
@login_required
def post_create(request):
"""
This makes sure that the form accpets a POST requests (of some data) or Nothing.
Without this the form would even accept empty data.
"""
form = PostForm(request.POST or None, request.FILES or None)
if request.POST:
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, 'Post created!')
return HttpResponseRedirect(instance.get_absolute_url())
else:
messages.error(request, 'Sorry! Something went wrong.',
extra_tags='')
context = {'title': 'Create Post', 'form': form}
return render(request, 'post/create.html', context)
def post_view(request, slug):
instance = get_object_or_404(Post, slug=slug)
context = {'instance': instance}
return render(request, 'post/view.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .forms import PostForm
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from .models import Post
from django.contrib import messages
# Create your views here.
@login_required
def post_create(request):
"""
This makes sure that the form accpets a POST requests (of some data) or Nothing.
Without this the form would even accept empty data.
"""
form = PostForm(request.POST or None, request.FILES or None)
if request.POST:
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, "Post created!")
return HttpResponseRedirect(instance.get_absolute_url())
else:
messages.error(request, "Sorry! Something went wrong.", extra_tags="")
context = {
'title': "Create Post",
'form' : form,
}
return render(request, 'post/create.html', context)
def post_view(request, slug):
instance = get_object_or_404(Post, slug=slug)
context = {
'instance' : instance
}
return render(request, 'post/view.html', context)
|
flexible
|
{
"blob_id": "4a2437d3d6ba549910bc30a67bf391b9bbafd25f",
"index": 6210,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@login_required\ndef post_create(request):\n \"\"\"\n\t\tThis makes sure that the form accpets a POST requests (of some data) or Nothing.\n\t\tWithout this the form would even accept empty data.\n\t\"\"\"\n form = PostForm(request.POST or None, request.FILES or None)\n if request.POST:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, 'Post created!')\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n messages.error(request, 'Sorry! Something went wrong.',\n extra_tags='')\n context = {'title': 'Create Post', 'form': form}\n return render(request, 'post/create.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_required\ndef post_create(request):\n \"\"\"\n\t\tThis makes sure that the form accpets a POST requests (of some data) or Nothing.\n\t\tWithout this the form would even accept empty data.\n\t\"\"\"\n form = PostForm(request.POST or None, request.FILES or None)\n if request.POST:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, 'Post created!')\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n messages.error(request, 'Sorry! Something went wrong.',\n extra_tags='')\n context = {'title': 'Create Post', 'form': form}\n return render(request, 'post/create.html', context)\n\n\ndef post_view(request, slug):\n instance = get_object_or_404(Post, slug=slug)\n context = {'instance': instance}\n return render(request, 'post/view.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom .forms import PostForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404\nfrom .models import Post\nfrom django.contrib import messages\n\n\n@login_required\ndef post_create(request):\n \"\"\"\n\t\tThis makes sure that the form accpets a POST requests (of some data) or Nothing.\n\t\tWithout this the form would even accept empty data.\n\t\"\"\"\n form = PostForm(request.POST or None, request.FILES or None)\n if request.POST:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, 'Post created!')\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n messages.error(request, 'Sorry! Something went wrong.',\n extra_tags='')\n context = {'title': 'Create Post', 'form': form}\n return render(request, 'post/create.html', context)\n\n\ndef post_view(request, slug):\n instance = get_object_or_404(Post, slug=slug)\n context = {'instance': instance}\n return render(request, 'post/view.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom .forms import PostForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404\nfrom .models import Post\nfrom django.contrib import messages\n# Create your views here.\n@login_required\ndef post_create(request):\n\t\"\"\"\n\t\tThis makes sure that the form accpets a POST requests (of some data) or Nothing.\n\t\tWithout this the form would even accept empty data.\n\t\"\"\"\n\tform = PostForm(request.POST or None, request.FILES or None)\n\tif request.POST:\n\t\tif form.is_valid():\n\t\t\tinstance = form.save(commit=False)\n\t\t\tinstance.user = request.user\n\t\t\tinstance.save()\n\t\t\tmessages.success(request, \"Post created!\")\n\t\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\t\telse:\n\t\t\tmessages.error(request, \"Sorry! Something went wrong.\", extra_tags=\"\")\n\tcontext = {\n\t\t'title': \"Create Post\",\n\t\t'form' : form,\n\t}\n\treturn render(request, 'post/create.html', context)\n\n\ndef post_view(request, slug):\n\tinstance = get_object_or_404(Post, slug=slug)\n\n\tcontext = {\n\t\t'instance' : instance\t\n\t}\n\treturn render(request, 'post/view.html', context)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class SWFRect(object):
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(
self.ymin) + ',' + str(self.ymax) + ')'
<|reserved_special_token_0|>
class SWFTag(object):
def __init__(self, code, length):
self.code = code
self.length = length
self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')
if self.typeName == '!UNKNOWN!':
print('warning: unknown swf tag code: ' + str(self.code))
def isEndTag(self):
return self.typeName == 'End'
def __str__(self):
return 'SWFTag(code=' + str(self.code
) + ' "' + self.typeName + '", length=' + str(self.length) + ')'
class SWFFile(object):
def __init__(self, filepath):
self.filepath = filepath
self.compression = None
self.version = None
self.fileLength = None
self.frameSize = None
self.frameRate = None
self.frameCount = None
self.tags = []
self.chunkSize = 16 * 4096
self.load()
def load(self):
"""loads the swf file at the filepath"""
self.handle = open(self.filepath, 'rb')
self.unpackHeader1()
print('signature:', self.signature)
print('version:', self.version)
print('fileLength:', self.fileLength)
if self.compression != 'none':
self.decompress()
self.unpackHeader2()
print('frameSize:', self.frameSize)
print('frameRate:', self.frameRate)
print('frameCount:', self.frameCount)
self.unpackTags()
for tag in self.tags:
print(tag)
if tag.typeName == '!UNKNOWN!':
print('warning: unknown tag!')
def decompress(self):
"""replaces the handle with a tempfile handle with all content decompressed"""
temp = tempfile.TemporaryFile('w+b')
if self.compression == 'zlib':
decompressor = zlib.decompressobj()
elif self.compression == 'lzma':
decompressor = lzma.LZMADecompressor()
else:
raise Exception('unknown compression algorithm: ' + self.
compression)
chunk = self.handle.read(self.chunkSize)
while len(chunk) > 0:
temp.write(decompressor.decompress(chunk))
chunk = self.handle.read(self.chunkSize)
temp.seek(0)
self.handle = temp
def unpackHeader1(self):
"""unpacks the first 8 bytes of the header and figures out what compression there is"""
header = self.handle.read(8)
signature, self.version, self.fileLength = struct.unpack('<3sBI',
header)
signature = signature.decode('ascii')
if signature == 'FWS':
self.compression = 'none'
elif signature == 'CWS':
self.compression = 'zlib'
elif signature == 'ZWS':
self.compression = 'lzma'
else:
raise SWFFileUnpackingException('unknown file signature: "' +
signature + '"')
self.signature = signature
def unpackHeader2(self):
"""unpacks the rest of the header data that might have been compressed"""
self.frameSize = self.unpackRect()
self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.
read(4))
def unpackRect(self):
data = self.handle.read(1)
size, = bitstruct.unpack('u5', data)
data += self.handle.read(math.ceil((size * 4 - 3) / 8))
xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *
4, data)
return SWFRect(xmin, xmax, ymin, ymax)
def unpackTags(self):
sample = self.handle.read(2)
tag = None
while len(sample) > 0:
if tag is not None and tag.isEndTag():
print('warning: swf has tags after an end tag!')
self.handle.seek(-2, os.SEEK_CUR)
tag = self.unpackTag()
self.tags.append(tag)
sample = self.handle.read(2)
def unpackTag(self):
tag = self.unpackTagHeader()
self.handle.read(tag.length)
return tag
def unpackTagHeader(self):
data, = struct.unpack('<H', self.handle.read(2))
tagCode = data >> 6
tagLength = data & 63
if tagLength == 63:
tagLength, = struct.unpack('<I', self.handle.read(4))
return SWFTag(tagCode, tagLength)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SWFFileUnpackingException(Exception):
<|reserved_special_token_0|>
class SWFRect(object):
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(
self.ymin) + ',' + str(self.ymax) + ')'
<|reserved_special_token_0|>
class SWFTag(object):
def __init__(self, code, length):
self.code = code
self.length = length
self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')
if self.typeName == '!UNKNOWN!':
print('warning: unknown swf tag code: ' + str(self.code))
def isEndTag(self):
return self.typeName == 'End'
def __str__(self):
return 'SWFTag(code=' + str(self.code
) + ' "' + self.typeName + '", length=' + str(self.length) + ')'
class SWFFile(object):
def __init__(self, filepath):
self.filepath = filepath
self.compression = None
self.version = None
self.fileLength = None
self.frameSize = None
self.frameRate = None
self.frameCount = None
self.tags = []
self.chunkSize = 16 * 4096
self.load()
def load(self):
"""loads the swf file at the filepath"""
self.handle = open(self.filepath, 'rb')
self.unpackHeader1()
print('signature:', self.signature)
print('version:', self.version)
print('fileLength:', self.fileLength)
if self.compression != 'none':
self.decompress()
self.unpackHeader2()
print('frameSize:', self.frameSize)
print('frameRate:', self.frameRate)
print('frameCount:', self.frameCount)
self.unpackTags()
for tag in self.tags:
print(tag)
if tag.typeName == '!UNKNOWN!':
print('warning: unknown tag!')
def decompress(self):
"""replaces the handle with a tempfile handle with all content decompressed"""
temp = tempfile.TemporaryFile('w+b')
if self.compression == 'zlib':
decompressor = zlib.decompressobj()
elif self.compression == 'lzma':
decompressor = lzma.LZMADecompressor()
else:
raise Exception('unknown compression algorithm: ' + self.
compression)
chunk = self.handle.read(self.chunkSize)
while len(chunk) > 0:
temp.write(decompressor.decompress(chunk))
chunk = self.handle.read(self.chunkSize)
temp.seek(0)
self.handle = temp
def unpackHeader1(self):
"""unpacks the first 8 bytes of the header and figures out what compression there is"""
header = self.handle.read(8)
signature, self.version, self.fileLength = struct.unpack('<3sBI',
header)
signature = signature.decode('ascii')
if signature == 'FWS':
self.compression = 'none'
elif signature == 'CWS':
self.compression = 'zlib'
elif signature == 'ZWS':
self.compression = 'lzma'
else:
raise SWFFileUnpackingException('unknown file signature: "' +
signature + '"')
self.signature = signature
def unpackHeader2(self):
"""unpacks the rest of the header data that might have been compressed"""
self.frameSize = self.unpackRect()
self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.
read(4))
def unpackRect(self):
data = self.handle.read(1)
size, = bitstruct.unpack('u5', data)
data += self.handle.read(math.ceil((size * 4 - 3) / 8))
xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *
4, data)
return SWFRect(xmin, xmax, ymin, ymax)
def unpackTags(self):
sample = self.handle.read(2)
tag = None
while len(sample) > 0:
if tag is not None and tag.isEndTag():
print('warning: swf has tags after an end tag!')
self.handle.seek(-2, os.SEEK_CUR)
tag = self.unpackTag()
self.tags.append(tag)
sample = self.handle.read(2)
def unpackTag(self):
tag = self.unpackTagHeader()
self.handle.read(tag.length)
return tag
def unpackTagHeader(self):
data, = struct.unpack('<H', self.handle.read(2))
tagCode = data >> 6
tagLength = data & 63
if tagLength == 63:
tagLength, = struct.unpack('<I', self.handle.read(4))
return SWFTag(tagCode, tagLength)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SWFFileUnpackingException(Exception):
"""generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values"""
class SWFRect(object):
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(
self.ymin) + ',' + str(self.ymax) + ')'
<|reserved_special_token_0|>
class SWFTag(object):
def __init__(self, code, length):
self.code = code
self.length = length
self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')
if self.typeName == '!UNKNOWN!':
print('warning: unknown swf tag code: ' + str(self.code))
def isEndTag(self):
return self.typeName == 'End'
def __str__(self):
return 'SWFTag(code=' + str(self.code
) + ' "' + self.typeName + '", length=' + str(self.length) + ')'
class SWFFile(object):
def __init__(self, filepath):
self.filepath = filepath
self.compression = None
self.version = None
self.fileLength = None
self.frameSize = None
self.frameRate = None
self.frameCount = None
self.tags = []
self.chunkSize = 16 * 4096
self.load()
def load(self):
"""loads the swf file at the filepath"""
self.handle = open(self.filepath, 'rb')
self.unpackHeader1()
print('signature:', self.signature)
print('version:', self.version)
print('fileLength:', self.fileLength)
if self.compression != 'none':
self.decompress()
self.unpackHeader2()
print('frameSize:', self.frameSize)
print('frameRate:', self.frameRate)
print('frameCount:', self.frameCount)
self.unpackTags()
for tag in self.tags:
print(tag)
if tag.typeName == '!UNKNOWN!':
print('warning: unknown tag!')
def decompress(self):
"""replaces the handle with a tempfile handle with all content decompressed"""
temp = tempfile.TemporaryFile('w+b')
if self.compression == 'zlib':
decompressor = zlib.decompressobj()
elif self.compression == 'lzma':
decompressor = lzma.LZMADecompressor()
else:
raise Exception('unknown compression algorithm: ' + self.
compression)
chunk = self.handle.read(self.chunkSize)
while len(chunk) > 0:
temp.write(decompressor.decompress(chunk))
chunk = self.handle.read(self.chunkSize)
temp.seek(0)
self.handle = temp
def unpackHeader1(self):
"""unpacks the first 8 bytes of the header and figures out what compression there is"""
header = self.handle.read(8)
signature, self.version, self.fileLength = struct.unpack('<3sBI',
header)
signature = signature.decode('ascii')
if signature == 'FWS':
self.compression = 'none'
elif signature == 'CWS':
self.compression = 'zlib'
elif signature == 'ZWS':
self.compression = 'lzma'
else:
raise SWFFileUnpackingException('unknown file signature: "' +
signature + '"')
self.signature = signature
def unpackHeader2(self):
"""unpacks the rest of the header data that might have been compressed"""
self.frameSize = self.unpackRect()
self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.
read(4))
def unpackRect(self):
data = self.handle.read(1)
size, = bitstruct.unpack('u5', data)
data += self.handle.read(math.ceil((size * 4 - 3) / 8))
xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *
4, data)
return SWFRect(xmin, xmax, ymin, ymax)
def unpackTags(self):
sample = self.handle.read(2)
tag = None
while len(sample) > 0:
if tag is not None and tag.isEndTag():
print('warning: swf has tags after an end tag!')
self.handle.seek(-2, os.SEEK_CUR)
tag = self.unpackTag()
self.tags.append(tag)
sample = self.handle.read(2)
def unpackTag(self):
tag = self.unpackTagHeader()
self.handle.read(tag.length)
return tag
def unpackTagHeader(self):
data, = struct.unpack('<H', self.handle.read(2))
tagCode = data >> 6
tagLength = data & 63
if tagLength == 63:
tagLength, = struct.unpack('<I', self.handle.read(4))
return SWFTag(tagCode, tagLength)
def main():
if len(sys.argv) < 2:
print('filepath required')
else:
file = SWFFile(sys.argv[1])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SWFFileUnpackingException(Exception):
"""generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values"""
class SWFRect(object):
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(
self.ymin) + ',' + str(self.ymax) + ')'
tagCodeTranslation = {(0): 'End', (1): 'ShowFrame', (2): 'DefineShape', (4):
'PlaceObject', (5): 'RemoveObject', (6): 'DefineBits', (7):
'DefineButton', (8): 'JPEGTables', (9): 'SetBackgroundColor', (10):
'DefineFont', (11): 'DefineText', (12): 'DoAction', (13):
'DefineFontInfo', (14): 'DefineSound', (15): 'StartSound', (17):
'DefineButtonSound', (18): 'SoundStreamHead', (19): 'SoundStreamBlock',
(20): 'DefineBitsLossless', (21): 'DefineBitsJPEG2', (22):
'DefineShape2', (23): 'DefineButtonCxform', (24): 'Protect', (26):
'PlaceObject2', (28): 'RemoveObject2', (32): 'DefineShape3', (33):
'DefineText2', (34): 'DefineButton2', (35): 'DefineBitsJPEG3', (36):
'DefineBitsLossless2', (37): 'DefineEditText', (39): 'DefineSprite', (
41): 'ProductInfo', (43): 'FrameLabel', (45): 'SoundStreamHead2', (46):
'DefineMorphShape', (48): 'DefineFont2', (56): 'ExportAssets', (57):
'ImportAssets', (58): 'EnableDebugger', (59): 'DoInitAction', (60):
'DefineVideoStream', (61): 'VideoFrame', (62): 'DefineFontInfo2', (63):
'DebugID', (64): 'EnableDebugger2', (65): 'ScriptLimits', (66):
'SetTabIndex', (69): 'FileAttributes', (70): 'PlaceObject3', (71):
'ImportAssets2', (73): 'DefineFontAlignZones', (74): 'CSMTextSettings',
(75): 'DefineFont3', (76): 'SymbolClass', (77): 'Metadata', (78):
'DefineScalingGrid', (82): 'DoABC', (83): 'DefineShape4', (84):
'DefineMorphShape2', (86): 'DefineSceneAndFrameLabelData', (87):
'DefineBinaryData', (88): 'DefineFontName', (89): 'StartSound2', (90):
'DefineBitsJPEG4', (91): 'DefineFont4', (93): 'EnableTelemetry'}
class SWFTag(object):
def __init__(self, code, length):
self.code = code
self.length = length
self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')
if self.typeName == '!UNKNOWN!':
print('warning: unknown swf tag code: ' + str(self.code))
def isEndTag(self):
return self.typeName == 'End'
def __str__(self):
return 'SWFTag(code=' + str(self.code
) + ' "' + self.typeName + '", length=' + str(self.length) + ')'
class SWFFile(object):
def __init__(self, filepath):
self.filepath = filepath
self.compression = None
self.version = None
self.fileLength = None
self.frameSize = None
self.frameRate = None
self.frameCount = None
self.tags = []
self.chunkSize = 16 * 4096
self.load()
def load(self):
"""loads the swf file at the filepath"""
self.handle = open(self.filepath, 'rb')
self.unpackHeader1()
print('signature:', self.signature)
print('version:', self.version)
print('fileLength:', self.fileLength)
if self.compression != 'none':
self.decompress()
self.unpackHeader2()
print('frameSize:', self.frameSize)
print('frameRate:', self.frameRate)
print('frameCount:', self.frameCount)
self.unpackTags()
for tag in self.tags:
print(tag)
if tag.typeName == '!UNKNOWN!':
print('warning: unknown tag!')
def decompress(self):
"""replaces the handle with a tempfile handle with all content decompressed"""
temp = tempfile.TemporaryFile('w+b')
if self.compression == 'zlib':
decompressor = zlib.decompressobj()
elif self.compression == 'lzma':
decompressor = lzma.LZMADecompressor()
else:
raise Exception('unknown compression algorithm: ' + self.
compression)
chunk = self.handle.read(self.chunkSize)
while len(chunk) > 0:
temp.write(decompressor.decompress(chunk))
chunk = self.handle.read(self.chunkSize)
temp.seek(0)
self.handle = temp
def unpackHeader1(self):
"""unpacks the first 8 bytes of the header and figures out what compression there is"""
header = self.handle.read(8)
signature, self.version, self.fileLength = struct.unpack('<3sBI',
header)
signature = signature.decode('ascii')
if signature == 'FWS':
self.compression = 'none'
elif signature == 'CWS':
self.compression = 'zlib'
elif signature == 'ZWS':
self.compression = 'lzma'
else:
raise SWFFileUnpackingException('unknown file signature: "' +
signature + '"')
self.signature = signature
def unpackHeader2(self):
"""unpacks the rest of the header data that might have been compressed"""
self.frameSize = self.unpackRect()
self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.
read(4))
def unpackRect(self):
data = self.handle.read(1)
size, = bitstruct.unpack('u5', data)
data += self.handle.read(math.ceil((size * 4 - 3) / 8))
xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *
4, data)
return SWFRect(xmin, xmax, ymin, ymax)
def unpackTags(self):
sample = self.handle.read(2)
tag = None
while len(sample) > 0:
if tag is not None and tag.isEndTag():
print('warning: swf has tags after an end tag!')
self.handle.seek(-2, os.SEEK_CUR)
tag = self.unpackTag()
self.tags.append(tag)
sample = self.handle.read(2)
def unpackTag(self):
tag = self.unpackTagHeader()
self.handle.read(tag.length)
return tag
def unpackTagHeader(self):
data, = struct.unpack('<H', self.handle.read(2))
tagCode = data >> 6
tagLength = data & 63
if tagLength == 63:
tagLength, = struct.unpack('<I', self.handle.read(4))
return SWFTag(tagCode, tagLength)
def main():
if len(sys.argv) < 2:
print('filepath required')
else:
file = SWFFile(sys.argv[1])
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys
import os
import math
import tempfile
import zlib
import lzma
import struct
import bitstruct
# a swf file unpacker and analyzer
# majority of information taken from https://www.adobe.com/devnet/swf.html (version 19)
# some additional information taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart
class SWFFileUnpackingException(Exception):
'''generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values'''
class SWFRect(object):
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
return 'SWFRect('+str(self.xmin)+','+str(self.xmax)+','+str(self.ymin)+','+str(self.ymax)+')'
tagCodeTranslation = {
0:'End',
1:'ShowFrame',
2:'DefineShape',
4:'PlaceObject',
5:'RemoveObject',
6:'DefineBits',
7:'DefineButton',
8:'JPEGTables',
9:'SetBackgroundColor',
10:'DefineFont',
11:'DefineText',
12:'DoAction',
13:'DefineFontInfo',
14:'DefineSound',
15:'StartSound',
17:'DefineButtonSound',
18:'SoundStreamHead',
19:'SoundStreamBlock',
20:'DefineBitsLossless',
21:'DefineBitsJPEG2',
22:'DefineShape2',
23:'DefineButtonCxform',
24:'Protect',
26:'PlaceObject2',
28:'RemoveObject2',
32:'DefineShape3',
33:'DefineText2',
34:'DefineButton2',
35:'DefineBitsJPEG3',
36:'DefineBitsLossless2',
37:'DefineEditText',
39:'DefineSprite',
41:'ProductInfo', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart
43:'FrameLabel',
45:'SoundStreamHead2',
46:'DefineMorphShape',
48:'DefineFont2',
56:'ExportAssets',
57:'ImportAssets',
58:'EnableDebugger',
59:'DoInitAction',
60:'DefineVideoStream',
61:'VideoFrame',
62:'DefineFontInfo2',
63:'DebugID', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart
64:'EnableDebugger2',
65:'ScriptLimits',
66:'SetTabIndex',
69:'FileAttributes',
70:'PlaceObject3',
71:'ImportAssets2',
73:'DefineFontAlignZones',
74:'CSMTextSettings',
75:'DefineFont3',
76:'SymbolClass',
77:'Metadata',
78:'DefineScalingGrid',
82:'DoABC',
83:'DefineShape4',
84:'DefineMorphShape2',
86:'DefineSceneAndFrameLabelData',
87:'DefineBinaryData',
88:'DefineFontName',
89:'StartSound2',
90:'DefineBitsJPEG4',
91:'DefineFont4',
93:'EnableTelemetry',
}
class SWFTag(object):
def __init__(self, code, length):
self.code = code
self.length = length
self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')
if self.typeName == '!UNKNOWN!':
print('warning: unknown swf tag code: '+str(self.code))
def isEndTag(self):
return self.typeName == 'End'
def __str__(self):
return 'SWFTag(code='+str(self.code)+' "'+self.typeName+'", length='+str(self.length)+')'
class SWFFile(object):
def __init__(self, filepath):
self.filepath = filepath
self.compression = None
self.version = None
self.fileLength = None
self.frameSize = None
self.frameRate = None
self.frameCount = None
self.tags = []
self.chunkSize = 16 * 4096
self.load()
def load(self):
'''loads the swf file at the filepath'''
self.handle = open(self.filepath, 'rb')
self.unpackHeader1()
print('signature:', self.signature)
print('version:', self.version)
print('fileLength:', self.fileLength)
if self.compression != 'none':
self.decompress()
self.unpackHeader2()
print('frameSize:', self.frameSize)
print('frameRate:', self.frameRate)
print('frameCount:', self.frameCount)
self.unpackTags()
for tag in self.tags:
print(tag)
if tag.typeName == '!UNKNOWN!':
print('warning: unknown tag!')
def decompress(self):
'''replaces the handle with a tempfile handle with all content decompressed'''
temp = tempfile.TemporaryFile('w+b')
if self.compression == 'zlib':
decompressor = zlib.decompressobj()
elif self.compression == 'lzma':
decompressor = lzma.LZMADecompressor()
else:
raise Exception("unknown compression algorithm: "+self.compression)
chunk = self.handle.read(self.chunkSize)
while len(chunk) > 0:
temp.write(decompressor.decompress(chunk))
chunk = self.handle.read(self.chunkSize)
temp.seek(0)
self.handle = temp
def unpackHeader1(self):
'''unpacks the first 8 bytes of the header and figures out what compression there is'''
header = self.handle.read(8)
signature, self.version, self.fileLength = struct.unpack('<3sBI', header)
signature = signature.decode('ascii')
if signature == 'FWS':
self.compression = 'none'
elif signature == 'CWS':
self.compression = 'zlib'
elif signature == 'ZWS':
self.compression = 'lzma'
else:
raise SWFFileUnpackingException('unknown file signature: "'+signature+'"')
self.signature = signature
def unpackHeader2(self):
'''unpacks the rest of the header data that might have been compressed'''
self.frameSize = self.unpackRect()
self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.read(4))
# frameRate is an 8.8 float actually, but i'm not sure how to unpack that...
def unpackRect(self):
data = self.handle.read(1)
size, = bitstruct.unpack('u5', data)
data += self.handle.read(math.ceil((size * 4 - 3) / 8))
xmin, xmax, ymin, ymax = bitstruct.unpack('p5'+('s'+str(size))*4, data)
return SWFRect(xmin, xmax, ymin, ymax)
def unpackTags(self):
sample = self.handle.read(2)
tag = None
while len(sample) > 0:
if tag is not None and tag.isEndTag():
print('warning: swf has tags after an end tag!')
self.handle.seek(-2, os.SEEK_CUR)
tag = self.unpackTag()
self.tags.append(tag)
sample = self.handle.read(2)
def unpackTag(self):
tag = self.unpackTagHeader()
self.handle.read(tag.length)
return tag
def unpackTagHeader(self):
data, = struct.unpack('<H', self.handle.read(2))
tagCode = data >> 6
tagLength = data & 0x3f
if tagLength == 0x3f:
tagLength, = struct.unpack('<I', self.handle.read(4))
return SWFTag(tagCode, tagLength)
def main():
if len(sys.argv) < 2:
print('filepath required')
else:
file = SWFFile(sys.argv[1])
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "4556febd5fddf390f370a8e24871eacf08d34c9f",
"index": 7087,
"step-1": "<mask token>\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\n<mask token>\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SWFFileUnpackingException(Exception):\n <mask token>\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\n<mask token>\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SWFFileUnpackingException(Exception):\n \"\"\"generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values\"\"\"\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\n<mask token>\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('filepath required')\n else:\n file = SWFFile(sys.argv[1])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SWFFileUnpackingException(Exception):\n \"\"\"generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values\"\"\"\n\n\nclass SWFRect(object):\n\n def __init__(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n\n def __str__(self):\n return 'SWFRect(' + str(self.xmin) + ',' + str(self.xmax) + ',' + str(\n self.ymin) + ',' + str(self.ymax) + ')'\n\n\ntagCodeTranslation = {(0): 'End', (1): 'ShowFrame', (2): 'DefineShape', (4):\n 'PlaceObject', (5): 'RemoveObject', (6): 'DefineBits', (7):\n 'DefineButton', (8): 'JPEGTables', (9): 'SetBackgroundColor', (10):\n 'DefineFont', (11): 'DefineText', (12): 'DoAction', (13):\n 'DefineFontInfo', (14): 'DefineSound', (15): 'StartSound', (17):\n 'DefineButtonSound', (18): 'SoundStreamHead', (19): 'SoundStreamBlock',\n (20): 'DefineBitsLossless', (21): 'DefineBitsJPEG2', (22):\n 'DefineShape2', (23): 'DefineButtonCxform', (24): 'Protect', (26):\n 'PlaceObject2', (28): 'RemoveObject2', (32): 'DefineShape3', (33):\n 'DefineText2', (34): 'DefineButton2', (35): 'DefineBitsJPEG3', (36):\n 'DefineBitsLossless2', (37): 'DefineEditText', (39): 'DefineSprite', (\n 41): 'ProductInfo', (43): 'FrameLabel', (45): 'SoundStreamHead2', (46):\n 'DefineMorphShape', (48): 'DefineFont2', (56): 'ExportAssets', (57):\n 'ImportAssets', (58): 'EnableDebugger', (59): 'DoInitAction', (60):\n 'DefineVideoStream', (61): 'VideoFrame', (62): 'DefineFontInfo2', (63):\n 'DebugID', (64): 'EnableDebugger2', (65): 'ScriptLimits', (66):\n 'SetTabIndex', (69): 'FileAttributes', (70): 'PlaceObject3', (71):\n 'ImportAssets2', (73): 'DefineFontAlignZones', (74): 'CSMTextSettings',\n (75): 'DefineFont3', (76): 'SymbolClass', (77): 'Metadata', (78):\n 'DefineScalingGrid', (82): 'DoABC', (83): 'DefineShape4', (84):\n 'DefineMorphShape2', (86): 'DefineSceneAndFrameLabelData', (87):\n 'DefineBinaryData', (88): 'DefineFontName', (89): 'StartSound2', (90):\n 'DefineBitsJPEG4', (91): 'DefineFont4', (93): 'EnableTelemetry'}\n\n\nclass SWFTag(object):\n\n def __init__(self, code, length):\n self.code = code\n self.length = length\n self.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n if self.typeName == '!UNKNOWN!':\n print('warning: unknown swf tag code: ' + str(self.code))\n\n def isEndTag(self):\n return self.typeName == 'End'\n\n def __str__(self):\n return 'SWFTag(code=' + str(self.code\n ) + ' \"' + self.typeName + '\", length=' + str(self.length) + ')'\n\n\nclass SWFFile(object):\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.compression = None\n self.version = None\n self.fileLength = None\n self.frameSize = None\n self.frameRate = None\n self.frameCount = None\n self.tags = []\n self.chunkSize = 16 * 4096\n self.load()\n\n def load(self):\n \"\"\"loads the swf file at the filepath\"\"\"\n self.handle = open(self.filepath, 'rb')\n self.unpackHeader1()\n print('signature:', self.signature)\n print('version:', self.version)\n print('fileLength:', self.fileLength)\n if self.compression != 'none':\n self.decompress()\n self.unpackHeader2()\n print('frameSize:', self.frameSize)\n print('frameRate:', self.frameRate)\n print('frameCount:', self.frameCount)\n self.unpackTags()\n for tag in self.tags:\n print(tag)\n if tag.typeName == '!UNKNOWN!':\n print('warning: unknown tag!')\n\n def decompress(self):\n \"\"\"replaces the handle with a tempfile handle with all content decompressed\"\"\"\n temp = tempfile.TemporaryFile('w+b')\n if self.compression == 'zlib':\n decompressor = zlib.decompressobj()\n elif self.compression == 'lzma':\n decompressor = lzma.LZMADecompressor()\n else:\n raise Exception('unknown compression algorithm: ' + self.\n compression)\n chunk = self.handle.read(self.chunkSize)\n while len(chunk) > 0:\n temp.write(decompressor.decompress(chunk))\n chunk = self.handle.read(self.chunkSize)\n temp.seek(0)\n self.handle = temp\n\n def unpackHeader1(self):\n \"\"\"unpacks the first 8 bytes of the header and figures out what compression there is\"\"\"\n header = self.handle.read(8)\n signature, self.version, self.fileLength = struct.unpack('<3sBI',\n header)\n signature = signature.decode('ascii')\n if signature == 'FWS':\n self.compression = 'none'\n elif signature == 'CWS':\n self.compression = 'zlib'\n elif signature == 'ZWS':\n self.compression = 'lzma'\n else:\n raise SWFFileUnpackingException('unknown file signature: \"' +\n signature + '\"')\n self.signature = signature\n\n def unpackHeader2(self):\n \"\"\"unpacks the rest of the header data that might have been compressed\"\"\"\n self.frameSize = self.unpackRect()\n self.frameRate, self.frameCount = struct.unpack('<HH', self.handle.\n read(4))\n\n def unpackRect(self):\n data = self.handle.read(1)\n size, = bitstruct.unpack('u5', data)\n data += self.handle.read(math.ceil((size * 4 - 3) / 8))\n xmin, xmax, ymin, ymax = bitstruct.unpack('p5' + ('s' + str(size)) *\n 4, data)\n return SWFRect(xmin, xmax, ymin, ymax)\n\n def unpackTags(self):\n sample = self.handle.read(2)\n tag = None\n while len(sample) > 0:\n if tag is not None and tag.isEndTag():\n print('warning: swf has tags after an end tag!')\n self.handle.seek(-2, os.SEEK_CUR)\n tag = self.unpackTag()\n self.tags.append(tag)\n sample = self.handle.read(2)\n\n def unpackTag(self):\n tag = self.unpackTagHeader()\n self.handle.read(tag.length)\n return tag\n\n def unpackTagHeader(self):\n data, = struct.unpack('<H', self.handle.read(2))\n tagCode = data >> 6\n tagLength = data & 63\n if tagLength == 63:\n tagLength, = struct.unpack('<I', self.handle.read(4))\n return SWFTag(tagCode, tagLength)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('filepath required')\n else:\n file = SWFFile(sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\n\nimport sys\nimport os\nimport math\n\nimport tempfile\n\nimport zlib\nimport lzma\nimport struct\nimport bitstruct\n\n\n\n# a swf file unpacker and analyzer\n# majority of information taken from https://www.adobe.com/devnet/swf.html (version 19)\n# some additional information taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart\n\n\n\nclass SWFFileUnpackingException(Exception):\n\t'''generic exception during unpacking of a swf file typically due to incorrect structure or unexpected values'''\n\nclass SWFRect(object):\n\tdef __init__(self, xmin, xmax, ymin, ymax):\n\t\tself.xmin = xmin\n\t\tself.xmax = xmax\n\t\tself.ymin = ymin\n\t\tself.ymax = ymax\n\tdef __str__(self):\n\t\treturn 'SWFRect('+str(self.xmin)+','+str(self.xmax)+','+str(self.ymin)+','+str(self.ymax)+')'\n\n\ntagCodeTranslation = {\n\t0:'End',\n\t1:'ShowFrame',\n\t2:'DefineShape',\n\t4:'PlaceObject',\n\t5:'RemoveObject',\n\t6:'DefineBits',\n\t7:'DefineButton',\n\t8:'JPEGTables',\n\t9:'SetBackgroundColor',\n\t10:'DefineFont',\n\t11:'DefineText',\n\t12:'DoAction',\n\t13:'DefineFontInfo',\n\t14:'DefineSound',\n\t15:'StartSound',\n\t17:'DefineButtonSound',\n\t18:'SoundStreamHead',\n\t19:'SoundStreamBlock',\n\t20:'DefineBitsLossless',\n\t21:'DefineBitsJPEG2',\n\t22:'DefineShape2',\n\t23:'DefineButtonCxform',\n\t24:'Protect',\n\t26:'PlaceObject2',\n\t28:'RemoveObject2',\n\t32:'DefineShape3',\n\t33:'DefineText2',\n\t34:'DefineButton2',\n\t35:'DefineBitsJPEG3',\n\t36:'DefineBitsLossless2',\n\t37:'DefineEditText',\n\t39:'DefineSprite',\n\t41:'ProductInfo', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart\n\t43:'FrameLabel',\n\t45:'SoundStreamHead2',\n\t46:'DefineMorphShape',\n\t48:'DefineFont2',\n\t56:'ExportAssets',\n\t57:'ImportAssets',\n\t58:'EnableDebugger',\n\t59:'DoInitAction',\n\t60:'DefineVideoStream',\n\t61:'VideoFrame',\n\t62:'DefineFontInfo2',\n\t63:'DebugID', # taken from https://github.com/claus/as3swf/wiki/SWF-tag-support-chart\n\t64:'EnableDebugger2',\n\t65:'ScriptLimits',\n\t66:'SetTabIndex',\n\t69:'FileAttributes',\n\t70:'PlaceObject3',\n\t71:'ImportAssets2',\n\t73:'DefineFontAlignZones',\n\t74:'CSMTextSettings',\n\t75:'DefineFont3',\n\t76:'SymbolClass',\n\t77:'Metadata',\n\t78:'DefineScalingGrid',\n\t82:'DoABC',\n\t83:'DefineShape4',\n\t84:'DefineMorphShape2',\n\t86:'DefineSceneAndFrameLabelData',\n\t87:'DefineBinaryData',\n\t88:'DefineFontName',\n\t89:'StartSound2',\n\t90:'DefineBitsJPEG4',\n\t91:'DefineFont4',\n\t93:'EnableTelemetry',\n}\n\n\nclass SWFTag(object):\n\tdef __init__(self, code, length):\n\t\tself.code = code\n\t\tself.length = length\n\n\t\tself.typeName = tagCodeTranslation.get(self.code, '!UNKNOWN!')\n\t\tif self.typeName == '!UNKNOWN!':\n\t\t\tprint('warning: unknown swf tag code: '+str(self.code))\n\tdef isEndTag(self):\n\t\treturn self.typeName == 'End'\n\tdef __str__(self):\n\t\treturn 'SWFTag(code='+str(self.code)+' \"'+self.typeName+'\", length='+str(self.length)+')'\n\n\nclass SWFFile(object):\n\tdef __init__(self, filepath):\n\t\tself.filepath = filepath\n\n\t\tself.compression = None\n\t\tself.version = None\n\t\tself.fileLength = None\n\t\tself.frameSize = None\n\t\tself.frameRate = None\n\t\tself.frameCount = None\n\n\t\tself.tags = []\n\n\t\tself.chunkSize = 16 * 4096\n\n\t\tself.load()\n\n\tdef load(self):\n\t\t'''loads the swf file at the filepath'''\n\t\tself.handle = open(self.filepath, 'rb')\n\n\t\tself.unpackHeader1()\n\t\tprint('signature:', self.signature)\n\t\tprint('version:', self.version)\n\t\tprint('fileLength:', self.fileLength)\n\n\t\tif self.compression != 'none':\n\t\t\tself.decompress()\n\n\t\tself.unpackHeader2()\n\n\t\tprint('frameSize:', self.frameSize)\n\t\tprint('frameRate:', self.frameRate)\n\t\tprint('frameCount:', self.frameCount)\n\n\t\tself.unpackTags()\n\t\tfor tag in self.tags:\n\t\t\tprint(tag)\n\t\t\tif tag.typeName == '!UNKNOWN!':\n\t\t\t\tprint('warning: unknown tag!')\n\n\n\tdef decompress(self):\n\t\t'''replaces the handle with a tempfile handle with all content decompressed'''\n\t\ttemp = tempfile.TemporaryFile('w+b')\n\t\tif self.compression == 'zlib':\n\t\t\tdecompressor = zlib.decompressobj()\n\t\telif self.compression == 'lzma':\n\t\t\tdecompressor = lzma.LZMADecompressor()\n\t\telse:\n\t\t\traise Exception(\"unknown compression algorithm: \"+self.compression)\n\t\tchunk = self.handle.read(self.chunkSize)\n\t\twhile len(chunk) > 0:\n\t\t\ttemp.write(decompressor.decompress(chunk))\n\t\t\tchunk = self.handle.read(self.chunkSize)\n\t\ttemp.seek(0)\n\t\tself.handle = temp\n\n\tdef unpackHeader1(self):\n\t\t'''unpacks the first 8 bytes of the header and figures out what compression there is'''\n\t\theader = self.handle.read(8)\n\t\tsignature, self.version, self.fileLength = struct.unpack('<3sBI', header)\n\n\t\tsignature = signature.decode('ascii')\n\t\tif signature == 'FWS':\n\t\t\tself.compression = 'none'\n\t\telif signature == 'CWS':\n\t\t\tself.compression = 'zlib'\n\t\telif signature == 'ZWS':\n\t\t\tself.compression = 'lzma'\n\t\telse:\n\t\t\traise SWFFileUnpackingException('unknown file signature: \"'+signature+'\"')\n\n\t\tself.signature = signature\n\n\tdef unpackHeader2(self):\n\t\t'''unpacks the rest of the header data that might have been compressed'''\n\t\tself.frameSize = self.unpackRect()\n\t\tself.frameRate, self.frameCount = struct.unpack('<HH', self.handle.read(4))\n\t\t# frameRate is an 8.8 float actually, but i'm not sure how to unpack that...\n\n\tdef unpackRect(self):\n\t\tdata = self.handle.read(1)\n\t\tsize, = bitstruct.unpack('u5', data)\n\t\tdata += self.handle.read(math.ceil((size * 4 - 3) / 8))\n\t\txmin, xmax, ymin, ymax = bitstruct.unpack('p5'+('s'+str(size))*4, data)\n\t\treturn SWFRect(xmin, xmax, ymin, ymax)\n\n\tdef unpackTags(self):\n\t\tsample = self.handle.read(2)\n\t\ttag = None\n\t\twhile len(sample) > 0:\n\t\t\tif tag is not None and tag.isEndTag():\n\t\t\t\tprint('warning: swf has tags after an end tag!')\n\t\t\tself.handle.seek(-2, os.SEEK_CUR)\n\t\t\ttag = self.unpackTag()\n\t\t\tself.tags.append(tag)\n\n\t\t\tsample = self.handle.read(2)\n\n\tdef unpackTag(self):\n\t\ttag = self.unpackTagHeader()\n\t\tself.handle.read(tag.length)\n\t\treturn tag\n\tdef unpackTagHeader(self):\n\t\tdata, = struct.unpack('<H', self.handle.read(2))\n\t\ttagCode = data >> 6\n\t\ttagLength = data & 0x3f\n\t\tif tagLength == 0x3f:\n\t\t\ttagLength, = struct.unpack('<I', self.handle.read(4))\n\t\treturn SWFTag(tagCode, tagLength)\n\n\n\n\ndef main():\n\tif len(sys.argv) < 2:\n\t\tprint('filepath required')\n\telse:\n\t\tfile = SWFFile(sys.argv[1])\n\n\nif __name__ == '__main__':\n\tmain()\n",
"step-ids": [
17,
18,
20,
22,
24
]
}
|
[
17,
18,
20,
22,
24
] |
from collections import Counter
class Solution:
def countStudents(self, students, sandwiches) ->int:
if not students or not sandwiches:
return 0
while students:
top_san = sandwiches[0]
if top_san == students[0]:
students = students[1:]
sandwiches = sandwiches[1:]
else:
if top_san not in students:
break
idx = students.index(top_san)
students = students[idx:] + students[:idx]
return len(students)
def countStudents_2(self, students, sandwiches) ->int:
prefers = Counter(students)
n, k = len(students), 0
while k < n and prefers[sandwiches[k]]:
prefers[sandwiches[k]] -= 1
k += 1
return n - k
s = Solution()
s.countStudents([1, 1, 0, 0], [0, 1, 0, 1])
|
normal
|
{
"blob_id": "235fce2615e2a5879f455aac9bcecbc2d152679b",
"index": 4548,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def countStudents(self, students, sandwiches) ->int:\n if not students or not sandwiches:\n return 0\n while students:\n top_san = sandwiches[0]\n if top_san == students[0]:\n students = students[1:]\n sandwiches = sandwiches[1:]\n else:\n if top_san not in students:\n break\n idx = students.index(top_san)\n students = students[idx:] + students[:idx]\n return len(students)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def countStudents(self, students, sandwiches) ->int:\n if not students or not sandwiches:\n return 0\n while students:\n top_san = sandwiches[0]\n if top_san == students[0]:\n students = students[1:]\n sandwiches = sandwiches[1:]\n else:\n if top_san not in students:\n break\n idx = students.index(top_san)\n students = students[idx:] + students[:idx]\n return len(students)\n\n def countStudents_2(self, students, sandwiches) ->int:\n prefers = Counter(students)\n n, k = len(students), 0\n while k < n and prefers[sandwiches[k]]:\n prefers[sandwiches[k]] -= 1\n k += 1\n return n - k\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def countStudents(self, students, sandwiches) ->int:\n if not students or not sandwiches:\n return 0\n while students:\n top_san = sandwiches[0]\n if top_san == students[0]:\n students = students[1:]\n sandwiches = sandwiches[1:]\n else:\n if top_san not in students:\n break\n idx = students.index(top_san)\n students = students[idx:] + students[:idx]\n return len(students)\n\n def countStudents_2(self, students, sandwiches) ->int:\n prefers = Counter(students)\n n, k = len(students), 0\n while k < n and prefers[sandwiches[k]]:\n prefers[sandwiches[k]] -= 1\n k += 1\n return n - k\n\n\ns = Solution()\ns.countStudents([1, 1, 0, 0], [0, 1, 0, 1])\n",
"step-4": "from collections import Counter\n\n\nclass Solution:\n\n def countStudents(self, students, sandwiches) ->int:\n if not students or not sandwiches:\n return 0\n while students:\n top_san = sandwiches[0]\n if top_san == students[0]:\n students = students[1:]\n sandwiches = sandwiches[1:]\n else:\n if top_san not in students:\n break\n idx = students.index(top_san)\n students = students[idx:] + students[:idx]\n return len(students)\n\n def countStudents_2(self, students, sandwiches) ->int:\n prefers = Counter(students)\n n, k = len(students), 0\n while k < n and prefers[sandwiches[k]]:\n prefers[sandwiches[k]] -= 1\n k += 1\n return n - k\n\n\ns = Solution()\ns.countStudents([1, 1, 0, 0], [0, 1, 0, 1])\n",
"step-5": null,
"step-ids": [
2,
3,
5,
6
]
}
|
[
2,
3,
5,
6
] |
# -*- coding: utf-8 -*-
a=float(input('Digite um número:'))
b=(a-(a%1))
c=(a%1)
print('O valor inteiro é %d' %b)
print('O valor decimal é %.6f' %c)
|
normal
|
{
"blob_id": "1b09b18926dc95d4c4b3088f45088f12c162ccb3",
"index": 5465,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('O valor inteiro é %d' % b)\nprint('O valor decimal é %.6f' % c)\n",
"step-3": "a = float(input('Digite um número:'))\nb = a - a % 1\nc = a % 1\nprint('O valor inteiro é %d' % b)\nprint('O valor decimal é %.6f' % c)\n",
"step-4": "# -*- coding: utf-8 -*-\na=float(input('Digite um número:'))\nb=(a-(a%1))\nc=(a%1)\nprint('O valor inteiro é %d' %b)\nprint('O valor decimal é %.6f' %c)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f"Got new URL to check: {message['url']}.")
try:
urllib.request.urlopen('https://github.com/' + message['url'])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(
message['id']) + '/update', json.dumps({'link_accessible': valid}).
encode('utf8'), method='POST', headers={'content-type':
'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def validate_urls():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')
)
channel = connection.channel()
channel.queue_declare(queue='urlValidationQueue')
channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True
)
channel.start_consuming()
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f"Got new URL to check: {message['url']}.")
try:
urllib.request.urlopen('https://github.com/' + message['url'])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(
message['id']) + '/update', json.dumps({'link_accessible': valid}).
encode('utf8'), method='POST', headers={'content-type':
'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def validate_urls():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')
)
channel = connection.channel()
channel.queue_declare(queue='urlValidationQueue')
channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True
)
channel.start_consuming()
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f"Got new URL to check: {message['url']}.")
try:
urllib.request.urlopen('https://github.com/' + message['url'])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(
message['id']) + '/update', json.dumps({'link_accessible': valid}).
encode('utf8'), method='POST', headers={'content-type':
'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
if __name__ == '__main__':
print('Validator worker started. Waiting for tasks to do...')
validate_urls()
<|reserved_special_token_1|>
import json
import pika
import urllib.request
def validate_urls():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')
)
channel = connection.channel()
channel.queue_declare(queue='urlValidationQueue')
channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True
)
channel.start_consuming()
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f"Got new URL to check: {message['url']}.")
try:
urllib.request.urlopen('https://github.com/' + message['url'])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(
message['id']) + '/update', json.dumps({'link_accessible': valid}).
encode('utf8'), method='POST', headers={'content-type':
'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
if __name__ == '__main__':
print('Validator worker started. Waiting for tasks to do...')
validate_urls()
<|reserved_special_token_1|>
import json
import pika
import urllib.request
def validate_urls():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='urlValidationQueue')
channel.basic_consume(validate_url,
queue='urlValidationQueue',
no_ack=True)
channel.start_consuming()
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f'Got new URL to check: {message["url"]}.')
try:
urllib.request.urlopen('https://github.com/' + message["url"])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(message["id"]) + '/update',
json.dumps({'link_accessible': valid}).encode('utf8'), method='POST',
headers={'content-type': 'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
if __name__ == '__main__':
print("Validator worker started. Waiting for tasks to do...")
validate_urls()
|
flexible
|
{
"blob_id": "4a09096abf073294afcf21b1eff9350329d4db33",
"index": 5252,
"step-1": "<mask token>\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print('Validator worker started. Waiting for tasks to do...')\n validate_urls()\n",
"step-4": "import json\nimport pika\nimport urllib.request\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print('Validator worker started. Waiting for tasks to do...')\n validate_urls()\n",
"step-5": "import json\nimport pika\nimport urllib.request\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url,\n queue='urlValidationQueue',\n no_ack=True)\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f'Got new URL to check: {message[\"url\"]}.')\n\n try:\n urllib.request.urlopen('https://github.com/' + message[\"url\"])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(message[\"id\"]) + '/update',\n json.dumps({'link_accessible': valid}).encode('utf8'), method='POST',\n headers={'content-type': 'application/json'})\n\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print(\"Validator worker started. Waiting for tasks to do...\")\n validate_urls()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import location
import teamList
import pandas as pd
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
##adapted from code from this website:
## https://towardsdatascience.com/simple-little-tables-with-matplotlib-9780ef5d0bc4
year = "18-19"
team = "ARI"
seasonReportRaw = pd.read_csv("Data/" + year + " " + team + "/" + team + "_SeasonRaw.csv")
seasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw['tEPDEP'] + seasonReportRaw['tEPDOP']
homeWins = seasonReportRaw[(seasonReportRaw["Home Team"] == team) & (seasonReportRaw["Home Score"] > seasonReportRaw["Away Score"])]
awayWins = seasonReportRaw[(seasonReportRaw["Away Team"] == team) & (seasonReportRaw["Away Score"] > seasonReportRaw["Home Score"])]
homeLosses = seasonReportRaw[(seasonReportRaw["Home Team"] == team) & (seasonReportRaw["Home Score"] < seasonReportRaw["Away Score"])]
awayLosses = seasonReportRaw[(seasonReportRaw["Away Team"] == team) & (seasonReportRaw["Away Score"] < seasonReportRaw["Home Score"])]
winCount = homeWins["Home Team"].count() + awayWins["Away Team"].count()
PenaltiesSeasonTotal = seasonReportRaw["tPEN(#)"].sum()
PenaltiesSeasonAverage = PenaltiesSeasonTotal / 16
PenaltiesWinTotal = homeWins["tPEN(#)"].sum() + awayWins["tPEN(#)"].sum()
PenaltiesWinAverage = PenaltiesWinTotal / winCount
PenaltiesLossTotal = homeLosses["tPEN(#)"].sum() + awayLosses["tPEN(#)"].sum()
PenaltiesLossAverage = PenaltiesLossTotal / (16-winCount)
EPCSeasonTotal = seasonReportRaw["tEPPfP"].sum()
EPCSeasonAverage = EPCSeasonTotal / 16
EPCWinTotal = homeWins["tEPPfP"].sum() + awayWins["tEPPfP"].sum()
EPCWinAverage = EPCWinTotal / winCount
EPCLossTotal = homeLosses["tEPPfP"].sum() + awayLosses["tEPPfP"].sum()
EPCLossAverage = EPCLossTotal / (16-winCount)
EPCDHPSeasonTotal = seasonReportRaw["tEPDHP"].sum()
EPCDHPSeasonAverage = EPCDHPSeasonTotal / 16
EPCDHPWinTotal = homeWins["tEPDHP"].sum() + awayWins["tEPDHP"].sum()
EPCDHPWinAverage = EPCDHPWinTotal / winCount
EPCDHPLossTotal = homeLosses["tEPDHP"].sum() + awayLosses["tEPDHP"].sum()
EPCDHPLossAverage = EPCDHPLossTotal / (16-winCount)
EPCDEPSeasonTotal = seasonReportRaw["tEPDEP"].sum()
EPCDEPSeasonAverage = EPCDEPSeasonTotal / 16
EPCDEPWinTotal = homeWins["tEPDEP"].sum() + awayWins["tEPDEP"].sum()
EPCDEPWinAverage = EPCDEPWinTotal / winCount
EPCDEPLossTotal = homeLosses["tEPDEP"].sum() + awayLosses["tEPDEP"].sum()
EPCDEPLossAverage = EPCDEPLossTotal / (16-winCount)
EPCOPSeasonTotal = seasonReportRaw["tEPDOP"].sum()
EPCOPSeasonAverage = EPCOPSeasonTotal / 16
EPCOPWinTotal = homeWins["tEPDOP"].sum() + awayWins["tEPDOP"].sum()
EPCOPWinAverage = EPCOPWinTotal / winCount
EPCOPLossTotal = homeLosses["tEPDOP"].sum() + awayLosses["tEPDOP"].sum()
EPCOPLossAverage = EPCOPLossTotal / (16-winCount)
headerRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win', 'Loss Total','Per Loss']
penaltiesRow = ['Penalties',PenaltiesSeasonTotal,PenaltiesSeasonAverage,PenaltiesWinTotal,PenaltiesWinAverage,PenaltiesLossTotal,PenaltiesLossAverage]
EPCRow = ['EPC',EPCSeasonTotal,EPCSeasonAverage,EPCWinTotal,EPCWinAverage,EPCLossTotal,EPCLossAverage]
EPCDHPRow = ['EPCDHP',EPCDHPSeasonTotal,EPCDHPSeasonAverage,EPCDHPWinTotal,EPCDHPWinAverage,EPCDHPLossTotal,EPCDHPLossAverage]
EPCDEPRow = ['EPCDEP',EPCDEPSeasonTotal,EPCDEPSeasonAverage,EPCDEPWinTotal,EPCDEPWinAverage,EPCDEPLossTotal,EPCDEPLossAverage]
EPCOPRow = ['EPCOP',EPCOPSeasonTotal,EPCOPSeasonAverage,EPCOPWinTotal,EPCOPWinAverage,EPCOPLossTotal,EPCOPLossAverage]
fig_background_color = 'white'
fig_border = 'black'
data = [headerRow,penaltiesRow,EPCRow,EPCDHPRow,EPCDEPRow,EPCOPRow]
# Pop the headers from the data array
column_headers = data.pop(0)
row_headers = [x.pop(0) for x in data]
# Table data needs to be non-numeric text. Format the data
# while I'm at it.
cell_text = []
for row in data:
cell_text.append([f'{x:1.2f}' for x in row])
# Get some lists of color specs for row and column headers
rcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))
ccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))
# Create the figure. Setting a small pad on tight_layout
# seems to better regulate white space. Sometimes experimenting
# with an explicit figsize here can produce better outcome.
plt.figure(linewidth=2,
edgecolor=fig_border,
facecolor=fig_background_color,
tight_layout={'pad':1},
figsize=(4.5,1.75)
)
# Add a table at the bottom of the axes
the_table = plt.table(cellText=cell_text,
rowLabels=row_headers,
rowColours=rcolors,
rowLoc='right',
colColours=ccolors,
colLabels=column_headers,
loc='center')
# Scaling is the only influence we have over top and bottom cell padding.
# Make the rows taller (i.e., make cell y scale larger).
the_table.scale(1, 1.1)
# Hide axes
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Hide axes border
plt.box(on=None)
# Force the figure to update, so backends center objects correctly within the figure.
# Without plt.draw() here, the title will center on the axes and not the figure.
plt.draw()
# Create image. plt.savefig ignores figure edge and face colors, so map them.
fig = plt.gcf()
plt.savefig('pyplot-table-demo.png',
edgecolor=fig.get_edgecolor(),
facecolor=fig.get_facecolor(),
dpi=175
)
|
normal
|
{
"blob_id": "ba7db49ca7956fdc055702ffccba769485fd0046",
"index": 8915,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\n<mask token>\nplt.figure(linewidth=2, edgecolor=fig_border, facecolor=\n fig_background_color, tight_layout={'pad': 1}, figsize=(4.5, 1.75))\n<mask token>\nthe_table.scale(1, 1.1)\n<mask token>\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.box(on=None)\nplt.draw()\n<mask token>\nplt.savefig('pyplot-table-demo.png', edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(), dpi=175)\n",
"step-3": "<mask token>\nyear = '18-19'\nteam = 'ARI'\nseasonReportRaw = pd.read_csv('Data/' + year + ' ' + team + '/' + team +\n '_SeasonRaw.csv')\nseasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw[\n 'tEPDEP'] + seasonReportRaw['tEPDOP']\nhomeWins = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] > seasonReportRaw['Away Score'])]\nawayWins = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] > seasonReportRaw['Home Score'])]\nhomeLosses = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] < seasonReportRaw['Away Score'])]\nawayLosses = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] < seasonReportRaw['Home Score'])]\nwinCount = homeWins['Home Team'].count() + awayWins['Away Team'].count()\nPenaltiesSeasonTotal = seasonReportRaw['tPEN(#)'].sum()\nPenaltiesSeasonAverage = PenaltiesSeasonTotal / 16\nPenaltiesWinTotal = homeWins['tPEN(#)'].sum() + awayWins['tPEN(#)'].sum()\nPenaltiesWinAverage = PenaltiesWinTotal / winCount\nPenaltiesLossTotal = homeLosses['tPEN(#)'].sum() + awayLosses['tPEN(#)'].sum()\nPenaltiesLossAverage = PenaltiesLossTotal / (16 - winCount)\nEPCSeasonTotal = seasonReportRaw['tEPPfP'].sum()\nEPCSeasonAverage = EPCSeasonTotal / 16\nEPCWinTotal = homeWins['tEPPfP'].sum() + awayWins['tEPPfP'].sum()\nEPCWinAverage = EPCWinTotal / winCount\nEPCLossTotal = homeLosses['tEPPfP'].sum() + awayLosses['tEPPfP'].sum()\nEPCLossAverage = EPCLossTotal / (16 - winCount)\nEPCDHPSeasonTotal = seasonReportRaw['tEPDHP'].sum()\nEPCDHPSeasonAverage = EPCDHPSeasonTotal / 16\nEPCDHPWinTotal = homeWins['tEPDHP'].sum() + awayWins['tEPDHP'].sum()\nEPCDHPWinAverage = EPCDHPWinTotal / winCount\nEPCDHPLossTotal = homeLosses['tEPDHP'].sum() + awayLosses['tEPDHP'].sum()\nEPCDHPLossAverage = EPCDHPLossTotal / (16 - winCount)\nEPCDEPSeasonTotal = seasonReportRaw['tEPDEP'].sum()\nEPCDEPSeasonAverage = EPCDEPSeasonTotal / 16\nEPCDEPWinTotal = homeWins['tEPDEP'].sum() + awayWins['tEPDEP'].sum()\nEPCDEPWinAverage = EPCDEPWinTotal / winCount\nEPCDEPLossTotal = homeLosses['tEPDEP'].sum() + awayLosses['tEPDEP'].sum()\nEPCDEPLossAverage = EPCDEPLossTotal / (16 - winCount)\nEPCOPSeasonTotal = seasonReportRaw['tEPDOP'].sum()\nEPCOPSeasonAverage = EPCOPSeasonTotal / 16\nEPCOPWinTotal = homeWins['tEPDOP'].sum() + awayWins['tEPDOP'].sum()\nEPCOPWinAverage = EPCOPWinTotal / winCount\nEPCOPLossTotal = homeLosses['tEPDOP'].sum() + awayLosses['tEPDOP'].sum()\nEPCOPLossAverage = EPCOPLossTotal / (16 - winCount)\nheaderRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win',\n 'Loss Total', 'Per Loss']\npenaltiesRow = ['Penalties', PenaltiesSeasonTotal, PenaltiesSeasonAverage,\n PenaltiesWinTotal, PenaltiesWinAverage, PenaltiesLossTotal,\n PenaltiesLossAverage]\nEPCRow = ['EPC', EPCSeasonTotal, EPCSeasonAverage, EPCWinTotal,\n EPCWinAverage, EPCLossTotal, EPCLossAverage]\nEPCDHPRow = ['EPCDHP', EPCDHPSeasonTotal, EPCDHPSeasonAverage,\n EPCDHPWinTotal, EPCDHPWinAverage, EPCDHPLossTotal, EPCDHPLossAverage]\nEPCDEPRow = ['EPCDEP', EPCDEPSeasonTotal, EPCDEPSeasonAverage,\n EPCDEPWinTotal, EPCDEPWinAverage, EPCDEPLossTotal, EPCDEPLossAverage]\nEPCOPRow = ['EPCOP', EPCOPSeasonTotal, EPCOPSeasonAverage, EPCOPWinTotal,\n EPCOPWinAverage, EPCOPLossTotal, EPCOPLossAverage]\nfig_background_color = 'white'\nfig_border = 'black'\ndata = [headerRow, penaltiesRow, EPCRow, EPCDHPRow, EPCDEPRow, EPCOPRow]\ncolumn_headers = data.pop(0)\nrow_headers = [x.pop(0) for x in data]\ncell_text = []\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\nrcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))\nccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))\nplt.figure(linewidth=2, edgecolor=fig_border, facecolor=\n fig_background_color, tight_layout={'pad': 1}, figsize=(4.5, 1.75))\nthe_table = plt.table(cellText=cell_text, rowLabels=row_headers, rowColours\n =rcolors, rowLoc='right', colColours=ccolors, colLabels=column_headers,\n loc='center')\nthe_table.scale(1, 1.1)\nax = plt.gca()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.box(on=None)\nplt.draw()\nfig = plt.gcf()\nplt.savefig('pyplot-table-demo.png', edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(), dpi=175)\n",
"step-4": "import os\nimport location\nimport teamList\nimport pandas as pd\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\nyear = '18-19'\nteam = 'ARI'\nseasonReportRaw = pd.read_csv('Data/' + year + ' ' + team + '/' + team +\n '_SeasonRaw.csv')\nseasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw[\n 'tEPDEP'] + seasonReportRaw['tEPDOP']\nhomeWins = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] > seasonReportRaw['Away Score'])]\nawayWins = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] > seasonReportRaw['Home Score'])]\nhomeLosses = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] < seasonReportRaw['Away Score'])]\nawayLosses = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] < seasonReportRaw['Home Score'])]\nwinCount = homeWins['Home Team'].count() + awayWins['Away Team'].count()\nPenaltiesSeasonTotal = seasonReportRaw['tPEN(#)'].sum()\nPenaltiesSeasonAverage = PenaltiesSeasonTotal / 16\nPenaltiesWinTotal = homeWins['tPEN(#)'].sum() + awayWins['tPEN(#)'].sum()\nPenaltiesWinAverage = PenaltiesWinTotal / winCount\nPenaltiesLossTotal = homeLosses['tPEN(#)'].sum() + awayLosses['tPEN(#)'].sum()\nPenaltiesLossAverage = PenaltiesLossTotal / (16 - winCount)\nEPCSeasonTotal = seasonReportRaw['tEPPfP'].sum()\nEPCSeasonAverage = EPCSeasonTotal / 16\nEPCWinTotal = homeWins['tEPPfP'].sum() + awayWins['tEPPfP'].sum()\nEPCWinAverage = EPCWinTotal / winCount\nEPCLossTotal = homeLosses['tEPPfP'].sum() + awayLosses['tEPPfP'].sum()\nEPCLossAverage = EPCLossTotal / (16 - winCount)\nEPCDHPSeasonTotal = seasonReportRaw['tEPDHP'].sum()\nEPCDHPSeasonAverage = EPCDHPSeasonTotal / 16\nEPCDHPWinTotal = homeWins['tEPDHP'].sum() + awayWins['tEPDHP'].sum()\nEPCDHPWinAverage = EPCDHPWinTotal / winCount\nEPCDHPLossTotal = homeLosses['tEPDHP'].sum() + awayLosses['tEPDHP'].sum()\nEPCDHPLossAverage = EPCDHPLossTotal / (16 - winCount)\nEPCDEPSeasonTotal = seasonReportRaw['tEPDEP'].sum()\nEPCDEPSeasonAverage = EPCDEPSeasonTotal / 16\nEPCDEPWinTotal = homeWins['tEPDEP'].sum() + awayWins['tEPDEP'].sum()\nEPCDEPWinAverage = EPCDEPWinTotal / winCount\nEPCDEPLossTotal = homeLosses['tEPDEP'].sum() + awayLosses['tEPDEP'].sum()\nEPCDEPLossAverage = EPCDEPLossTotal / (16 - winCount)\nEPCOPSeasonTotal = seasonReportRaw['tEPDOP'].sum()\nEPCOPSeasonAverage = EPCOPSeasonTotal / 16\nEPCOPWinTotal = homeWins['tEPDOP'].sum() + awayWins['tEPDOP'].sum()\nEPCOPWinAverage = EPCOPWinTotal / winCount\nEPCOPLossTotal = homeLosses['tEPDOP'].sum() + awayLosses['tEPDOP'].sum()\nEPCOPLossAverage = EPCOPLossTotal / (16 - winCount)\nheaderRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win',\n 'Loss Total', 'Per Loss']\npenaltiesRow = ['Penalties', PenaltiesSeasonTotal, PenaltiesSeasonAverage,\n PenaltiesWinTotal, PenaltiesWinAverage, PenaltiesLossTotal,\n PenaltiesLossAverage]\nEPCRow = ['EPC', EPCSeasonTotal, EPCSeasonAverage, EPCWinTotal,\n EPCWinAverage, EPCLossTotal, EPCLossAverage]\nEPCDHPRow = ['EPCDHP', EPCDHPSeasonTotal, EPCDHPSeasonAverage,\n EPCDHPWinTotal, EPCDHPWinAverage, EPCDHPLossTotal, EPCDHPLossAverage]\nEPCDEPRow = ['EPCDEP', EPCDEPSeasonTotal, EPCDEPSeasonAverage,\n EPCDEPWinTotal, EPCDEPWinAverage, EPCDEPLossTotal, EPCDEPLossAverage]\nEPCOPRow = ['EPCOP', EPCOPSeasonTotal, EPCOPSeasonAverage, EPCOPWinTotal,\n EPCOPWinAverage, EPCOPLossTotal, EPCOPLossAverage]\nfig_background_color = 'white'\nfig_border = 'black'\ndata = [headerRow, penaltiesRow, EPCRow, EPCDHPRow, EPCDEPRow, EPCOPRow]\ncolumn_headers = data.pop(0)\nrow_headers = [x.pop(0) for x in data]\ncell_text = []\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\nrcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))\nccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))\nplt.figure(linewidth=2, edgecolor=fig_border, facecolor=\n fig_background_color, tight_layout={'pad': 1}, figsize=(4.5, 1.75))\nthe_table = plt.table(cellText=cell_text, rowLabels=row_headers, rowColours\n =rcolors, rowLoc='right', colColours=ccolors, colLabels=column_headers,\n loc='center')\nthe_table.scale(1, 1.1)\nax = plt.gca()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.box(on=None)\nplt.draw()\nfig = plt.gcf()\nplt.savefig('pyplot-table-demo.png', edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(), dpi=175)\n",
"step-5": "import os\nimport location\nimport teamList\nimport pandas as pd\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\n\n##adapted from code from this website:\n## https://towardsdatascience.com/simple-little-tables-with-matplotlib-9780ef5d0bc4\n\nyear = \"18-19\"\n\nteam = \"ARI\"\n\nseasonReportRaw = pd.read_csv(\"Data/\" + year + \" \" + team + \"/\" + team + \"_SeasonRaw.csv\")\nseasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw['tEPDEP'] + seasonReportRaw['tEPDOP']\n\nhomeWins = seasonReportRaw[(seasonReportRaw[\"Home Team\"] == team) & (seasonReportRaw[\"Home Score\"] > seasonReportRaw[\"Away Score\"])]\nawayWins = seasonReportRaw[(seasonReportRaw[\"Away Team\"] == team) & (seasonReportRaw[\"Away Score\"] > seasonReportRaw[\"Home Score\"])]\n\nhomeLosses = seasonReportRaw[(seasonReportRaw[\"Home Team\"] == team) & (seasonReportRaw[\"Home Score\"] < seasonReportRaw[\"Away Score\"])]\nawayLosses = seasonReportRaw[(seasonReportRaw[\"Away Team\"] == team) & (seasonReportRaw[\"Away Score\"] < seasonReportRaw[\"Home Score\"])]\n\nwinCount = homeWins[\"Home Team\"].count() + awayWins[\"Away Team\"].count()\n\nPenaltiesSeasonTotal = seasonReportRaw[\"tPEN(#)\"].sum()\nPenaltiesSeasonAverage = PenaltiesSeasonTotal / 16\nPenaltiesWinTotal = homeWins[\"tPEN(#)\"].sum() + awayWins[\"tPEN(#)\"].sum()\nPenaltiesWinAverage = PenaltiesWinTotal / winCount\nPenaltiesLossTotal = homeLosses[\"tPEN(#)\"].sum() + awayLosses[\"tPEN(#)\"].sum()\nPenaltiesLossAverage = PenaltiesLossTotal / (16-winCount)\n\nEPCSeasonTotal = seasonReportRaw[\"tEPPfP\"].sum()\nEPCSeasonAverage = EPCSeasonTotal / 16\nEPCWinTotal = homeWins[\"tEPPfP\"].sum() + awayWins[\"tEPPfP\"].sum()\nEPCWinAverage = EPCWinTotal / winCount\nEPCLossTotal = homeLosses[\"tEPPfP\"].sum() + awayLosses[\"tEPPfP\"].sum()\nEPCLossAverage = EPCLossTotal / (16-winCount)\n\nEPCDHPSeasonTotal = seasonReportRaw[\"tEPDHP\"].sum()\nEPCDHPSeasonAverage = EPCDHPSeasonTotal / 16\nEPCDHPWinTotal = homeWins[\"tEPDHP\"].sum() + awayWins[\"tEPDHP\"].sum()\nEPCDHPWinAverage = EPCDHPWinTotal / winCount\nEPCDHPLossTotal = homeLosses[\"tEPDHP\"].sum() + awayLosses[\"tEPDHP\"].sum()\nEPCDHPLossAverage = EPCDHPLossTotal / (16-winCount)\n\nEPCDEPSeasonTotal = seasonReportRaw[\"tEPDEP\"].sum()\nEPCDEPSeasonAverage = EPCDEPSeasonTotal / 16\nEPCDEPWinTotal = homeWins[\"tEPDEP\"].sum() + awayWins[\"tEPDEP\"].sum()\nEPCDEPWinAverage = EPCDEPWinTotal / winCount\nEPCDEPLossTotal = homeLosses[\"tEPDEP\"].sum() + awayLosses[\"tEPDEP\"].sum()\nEPCDEPLossAverage = EPCDEPLossTotal / (16-winCount)\n\nEPCOPSeasonTotal = seasonReportRaw[\"tEPDOP\"].sum()\nEPCOPSeasonAverage = EPCOPSeasonTotal / 16\nEPCOPWinTotal = homeWins[\"tEPDOP\"].sum() + awayWins[\"tEPDOP\"].sum()\nEPCOPWinAverage = EPCOPWinTotal / winCount\nEPCOPLossTotal = homeLosses[\"tEPDOP\"].sum() + awayLosses[\"tEPDOP\"].sum()\nEPCOPLossAverage = EPCOPLossTotal / (16-winCount)\n\nheaderRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win', 'Loss Total','Per Loss']\npenaltiesRow = ['Penalties',PenaltiesSeasonTotal,PenaltiesSeasonAverage,PenaltiesWinTotal,PenaltiesWinAverage,PenaltiesLossTotal,PenaltiesLossAverage]\nEPCRow = ['EPC',EPCSeasonTotal,EPCSeasonAverage,EPCWinTotal,EPCWinAverage,EPCLossTotal,EPCLossAverage]\nEPCDHPRow = ['EPCDHP',EPCDHPSeasonTotal,EPCDHPSeasonAverage,EPCDHPWinTotal,EPCDHPWinAverage,EPCDHPLossTotal,EPCDHPLossAverage]\nEPCDEPRow = ['EPCDEP',EPCDEPSeasonTotal,EPCDEPSeasonAverage,EPCDEPWinTotal,EPCDEPWinAverage,EPCDEPLossTotal,EPCDEPLossAverage]\nEPCOPRow = ['EPCOP',EPCOPSeasonTotal,EPCOPSeasonAverage,EPCOPWinTotal,EPCOPWinAverage,EPCOPLossTotal,EPCOPLossAverage]\n\nfig_background_color = 'white'\nfig_border = 'black'\ndata = [headerRow,penaltiesRow,EPCRow,EPCDHPRow,EPCDEPRow,EPCOPRow]\n\n# Pop the headers from the data array\ncolumn_headers = data.pop(0)\nrow_headers = [x.pop(0) for x in data]\n\n# Table data needs to be non-numeric text. Format the data\n# while I'm at it.\ncell_text = []\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\n\n# Get some lists of color specs for row and column headers\nrcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))\nccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))\n\n# Create the figure. Setting a small pad on tight_layout\n# seems to better regulate white space. Sometimes experimenting\n# with an explicit figsize here can produce better outcome.\nplt.figure(linewidth=2,\n edgecolor=fig_border,\n facecolor=fig_background_color,\n tight_layout={'pad':1},\n figsize=(4.5,1.75)\n )\n\n# Add a table at the bottom of the axes\nthe_table = plt.table(cellText=cell_text,\n rowLabels=row_headers,\n rowColours=rcolors,\n rowLoc='right',\n colColours=ccolors,\n colLabels=column_headers,\n loc='center')\n\n# Scaling is the only influence we have over top and bottom cell padding.\n# Make the rows taller (i.e., make cell y scale larger).\nthe_table.scale(1, 1.1)\n# Hide axes\nax = plt.gca()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n# Hide axes border\nplt.box(on=None)\n# Force the figure to update, so backends center objects correctly within the figure.\n# Without plt.draw() here, the title will center on the axes and not the figure.\nplt.draw()\n# Create image. plt.savefig ignores figure edge and face colors, so map them.\nfig = plt.gcf()\nplt.savefig('pyplot-table-demo.png',\n edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(),\n dpi=175\n )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def recursiveUnioniser(set):
if isinstance(set[0], int):
return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else:
return res
<|reserved_special_token_0|>
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
<|reserved_special_token_0|>
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
<|reserved_special_token_0|>
def repetitionAudit(set):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def recursiveUnioniser(set):
if isinstance(set[0], int):
return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else:
return res
print(recursiveUnioniser(trial))
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
print(mutualexclusion(trial, trial2))
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
print(intersection(trial, trial2))
def repetitionAudit(set):
pass
<|reserved_special_token_1|>
trial = [1, 2, 3]
trial2 = [3, 4, 5]
def recursiveUnioniser(set):
if isinstance(set[0], int):
return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else:
return res
print(recursiveUnioniser(trial))
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
print(mutualexclusion(trial, trial2))
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
print(intersection(trial, trial2))
def repetitionAudit(set):
pass
<|reserved_special_token_1|>
#This is a module which implements Naive Set Theory in Python.
#It will be useful for Unions, Intersections, Mutual Exclusion, and more.
#ideas: print(sum([[[1],[2]], [[3],[4]], [[5],[6]]], [])) Monoid - abstraction on +
trial = [1, 2, 3]
trial2 = [3, 4, 5]
def recursiveUnioniser(set):
if isinstance(set[0], int): return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else: return res
print(recursiveUnioniser(trial))
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
print(mutualexclusion(trial, trial2))
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
print(intersection(trial, trial2))
def repetitionAudit(set):
pass #this will audit a list to see if an element occurs more than once
#If it does, it will remove this element and return the list
|
flexible
|
{
"blob_id": "c632c50028fee2f19fb65458f0b55ec228b8006f",
"index": 2137,
"step-1": "<mask token>\n\n\ndef intersection(set_a, set_b):\n res = [i for i in set_a if i in set_b]\n return res\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef recursiveUnioniser(set):\n if isinstance(set[0], int):\n return set\n res = []\n for i in range(len(set)):\n for j in range(len(set[i])):\n res.append(set[i][j])\n if isinstance(res[0], list):\n return recursiveUnioniser(res)\n else:\n return res\n\n\n<mask token>\n\n\ndef mutualexclusion(set_a, set_b):\n res = [i for i in set_a if i not in set_b]\n res2 = [i for i in set_b if i not in set_a]\n res += res2\n return res\n\n\n<mask token>\n\n\ndef intersection(set_a, set_b):\n res = [i for i in set_a if i in set_b]\n return res\n\n\n<mask token>\n\n\ndef repetitionAudit(set):\n pass\n",
"step-3": "<mask token>\n\n\ndef recursiveUnioniser(set):\n if isinstance(set[0], int):\n return set\n res = []\n for i in range(len(set)):\n for j in range(len(set[i])):\n res.append(set[i][j])\n if isinstance(res[0], list):\n return recursiveUnioniser(res)\n else:\n return res\n\n\nprint(recursiveUnioniser(trial))\n\n\ndef mutualexclusion(set_a, set_b):\n res = [i for i in set_a if i not in set_b]\n res2 = [i for i in set_b if i not in set_a]\n res += res2\n return res\n\n\nprint(mutualexclusion(trial, trial2))\n\n\ndef intersection(set_a, set_b):\n res = [i for i in set_a if i in set_b]\n return res\n\n\nprint(intersection(trial, trial2))\n\n\ndef repetitionAudit(set):\n pass\n",
"step-4": "trial = [1, 2, 3]\ntrial2 = [3, 4, 5]\n\n\ndef recursiveUnioniser(set):\n if isinstance(set[0], int):\n return set\n res = []\n for i in range(len(set)):\n for j in range(len(set[i])):\n res.append(set[i][j])\n if isinstance(res[0], list):\n return recursiveUnioniser(res)\n else:\n return res\n\n\nprint(recursiveUnioniser(trial))\n\n\ndef mutualexclusion(set_a, set_b):\n res = [i for i in set_a if i not in set_b]\n res2 = [i for i in set_b if i not in set_a]\n res += res2\n return res\n\n\nprint(mutualexclusion(trial, trial2))\n\n\ndef intersection(set_a, set_b):\n res = [i for i in set_a if i in set_b]\n return res\n\n\nprint(intersection(trial, trial2))\n\n\ndef repetitionAudit(set):\n pass\n",
"step-5": "#This is a module which implements Naive Set Theory in Python.\n#It will be useful for Unions, Intersections, Mutual Exclusion, and more.\n#ideas: print(sum([[[1],[2]], [[3],[4]], [[5],[6]]], [])) Monoid - abstraction on +\n\n\ntrial = [1, 2, 3]\ntrial2 = [3, 4, 5]\n\ndef recursiveUnioniser(set):\n if isinstance(set[0], int): return set\n\n res = []\n\n for i in range(len(set)):\n for j in range(len(set[i])):\n res.append(set[i][j])\n\n if isinstance(res[0], list):\n return recursiveUnioniser(res)\n else: return res\n\nprint(recursiveUnioniser(trial))\n\ndef mutualexclusion(set_a, set_b):\n res = [i for i in set_a if i not in set_b]\n res2 = [i for i in set_b if i not in set_a]\n res += res2\n\n return res\n\nprint(mutualexclusion(trial, trial2))\n\ndef intersection(set_a, set_b):\n res = [i for i in set_a if i in set_b]\n\n return res\n\nprint(intersection(trial, trial2))\n\ndef repetitionAudit(set):\n pass #this will audit a list to see if an element occurs more than once\n #If it does, it will remove this element and return the list\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 foree <foree@foree-pc>
#
# Distributed under terms of the MIT license.
"""
配置logging的基本配置
"""
import logging
import sys
import os
from common.common import get_root_path
FILE_LEVEL = logging.DEBUG
STREAM_LEVEL = logging.WARN
LOG_DIR = os.path.join(get_root_path(), 'log')
PATH_LOG = os.path.join(get_root_path(), 'log/advanced_build_kit.log')
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
if not os.path.exists(PATH_LOG):
f = open(PATH_LOG, 'w')
f.write('')
f.close()
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create formatter
message_fmt = "%(asctime)s %(process)d/%(filename)s %(levelname)s/%(funcName)s(%(lineno)d): %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=message_fmt, datefmt=datefmt)
# create file handler
fh = logging.FileHandler(PATH_LOG)
fh.setLevel(FILE_LEVEL)
fh.setFormatter(formatter)
logger.addHandler(fh)
# create stdout handler
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(STREAM_LEVEL)
sh.setFormatter(formatter)
logger.addHandler(sh)
|
normal
|
{
"blob_id": "96910e9b6861fc9af0db3a3130d898fd1ee3daad",
"index": 3356,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\nif not os.path.exists(PATH_LOG):\n f = open(PATH_LOG, 'w')\n f.write('')\n f.close()\n<mask token>\nlogger.setLevel(logging.DEBUG)\n<mask token>\nfh.setLevel(FILE_LEVEL)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n<mask token>\nsh.setLevel(STREAM_LEVEL)\nsh.setFormatter(formatter)\nlogger.addHandler(sh)\n",
"step-3": "<mask token>\nFILE_LEVEL = logging.DEBUG\nSTREAM_LEVEL = logging.WARN\nLOG_DIR = os.path.join(get_root_path(), 'log')\nPATH_LOG = os.path.join(get_root_path(), 'log/advanced_build_kit.log')\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\nif not os.path.exists(PATH_LOG):\n f = open(PATH_LOG, 'w')\n f.write('')\n f.close()\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nmessage_fmt = (\n '%(asctime)s %(process)d/%(filename)s %(levelname)s/%(funcName)s(%(lineno)d): %(message)s'\n )\ndatefmt = '%Y-%m-%d %H:%M:%S'\nformatter = logging.Formatter(fmt=message_fmt, datefmt=datefmt)\nfh = logging.FileHandler(PATH_LOG)\nfh.setLevel(FILE_LEVEL)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nsh = logging.StreamHandler(stream=sys.stdout)\nsh.setLevel(STREAM_LEVEL)\nsh.setFormatter(formatter)\nlogger.addHandler(sh)\n",
"step-4": "<mask token>\nimport logging\nimport sys\nimport os\nfrom common.common import get_root_path\nFILE_LEVEL = logging.DEBUG\nSTREAM_LEVEL = logging.WARN\nLOG_DIR = os.path.join(get_root_path(), 'log')\nPATH_LOG = os.path.join(get_root_path(), 'log/advanced_build_kit.log')\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\nif not os.path.exists(PATH_LOG):\n f = open(PATH_LOG, 'w')\n f.write('')\n f.close()\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nmessage_fmt = (\n '%(asctime)s %(process)d/%(filename)s %(levelname)s/%(funcName)s(%(lineno)d): %(message)s'\n )\ndatefmt = '%Y-%m-%d %H:%M:%S'\nformatter = logging.Formatter(fmt=message_fmt, datefmt=datefmt)\nfh = logging.FileHandler(PATH_LOG)\nfh.setLevel(FILE_LEVEL)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nsh = logging.StreamHandler(stream=sys.stdout)\nsh.setLevel(STREAM_LEVEL)\nsh.setFormatter(formatter)\nlogger.addHandler(sh)\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2018 foree <foree@foree-pc>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\n配置logging的基本配置\n\"\"\"\nimport logging\nimport sys\nimport os\nfrom common.common import get_root_path\n\n\nFILE_LEVEL = logging.DEBUG\nSTREAM_LEVEL = logging.WARN\n\nLOG_DIR = os.path.join(get_root_path(), 'log')\nPATH_LOG = os.path.join(get_root_path(), 'log/advanced_build_kit.log')\n\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n\nif not os.path.exists(PATH_LOG):\n f = open(PATH_LOG, 'w')\n f.write('')\n f.close()\n\n# create logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n# create formatter\nmessage_fmt = \"%(asctime)s %(process)d/%(filename)s %(levelname)s/%(funcName)s(%(lineno)d): %(message)s\"\ndatefmt = \"%Y-%m-%d %H:%M:%S\"\nformatter = logging.Formatter(fmt=message_fmt, datefmt=datefmt)\n\n# create file handler\nfh = logging.FileHandler(PATH_LOG)\nfh.setLevel(FILE_LEVEL)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\n# create stdout handler\nsh = logging.StreamHandler(stream=sys.stdout)\nsh.setLevel(STREAM_LEVEL)\nsh.setFormatter(formatter)\nlogger.addHandler(sh)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, parentdir)
<|reserved_special_token_0|>
s2.setTitle('二叉树——递归套路')
<|reserved_special_token_0|>
r2.setTitle('二叉树——递归套路')
<|reserved_special_token_0|>
xmind.build(content, r2)
xmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\' +
xmind_name + '.xmind')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
<|reserved_special_token_0|>
xmind_name = '数据结构'
w = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\' +
xmind_name + '.xmind')
s2 = w.createSheet()
s2.setTitle('二叉树——递归套路')
r2 = s2.getRootTopic()
r2.setTitle('二叉树——递归套路')
content = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':
['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',
'3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',
'5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [
'给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',
'3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',
' int height;', '}', '---------------------',
'Info process(Node head){', ' if(node==null){', ' return node;',
' }', ' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)-1;',
' boolean isBalanced=true;',
' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'
, ' isBalanced=false;', ' }',
' return new Info(isBalanced,height);', '}']}], '题目2': [
'给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [
'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [
'Class Info(){', ' int maxDistance;', ' int height;', '}',
'---------------------', 'Info process(Node head){',
' if(head==null){', ' return new Info(0,0);', ' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)+1;',
' int maxDistance=Math.max(',
' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',
' leftInfo.height+rightInfo.height+1)',
' return new Info(maxDistance,height);', '}']}]}
xmind.build(content, r2)
xmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\' +
xmind_name + '.xmind')
<|reserved_special_token_1|>
import os, sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name = '数据结构'
w = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\' +
xmind_name + '.xmind')
s2 = w.createSheet()
s2.setTitle('二叉树——递归套路')
r2 = s2.getRootTopic()
r2.setTitle('二叉树——递归套路')
content = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':
['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',
'3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',
'5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [
'给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',
'3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',
' int height;', '}', '---------------------',
'Info process(Node head){', ' if(node==null){', ' return node;',
' }', ' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)-1;',
' boolean isBalanced=true;',
' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'
, ' isBalanced=false;', ' }',
' return new Info(isBalanced,height);', '}']}], '题目2': [
'给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [
'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [
'Class Info(){', ' int maxDistance;', ' int height;', '}',
'---------------------', 'Info process(Node head){',
' if(head==null){', ' return new Info(0,0);', ' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)+1;',
' int maxDistance=Math.max(',
' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',
' leftInfo.height+rightInfo.height+1)',
' return new Info(maxDistance,height);', '}']}]}
xmind.build(content, r2)
xmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\' +
xmind_name + '.xmind')
<|reserved_special_token_1|>
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="数据结构"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("二叉树——递归套路")
r2=s2.getRootTopic()
r2.setTitle("二叉树——递归套路")
content={
'递归套路':[
'可解决面试中绝大多数二叉树问题,尤其是树型dp问题',
'本质是利用递归遍历二叉树的便利性'
],
'思路':[
'1.假设以x节点为为头,假设可以向X左树和X右树要任何信息',
'2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',
'3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息',
'4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',
'5.递归函数都返回S,每一棵子树都这么要求',
'6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'
],
'题目1':[
'给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树',
{'思路':[
'1.左子树是否平衡',
'2.右子树是否平衡',
'3.左树与右树高在2以内',
]},
{'实现':[
'Class Info(){',
' boolean isBalanced;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(node==null){',
' return node;',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)-1;',
' boolean isBalanced=true;',
' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){',
' isBalanced=false;',
' }',
' return new Info(isBalanced,height);',
'}'
]}
],
'题目2':[
'给定一棵二叉树的头节点head,任何两个节点之前都存在距离',
'返回整棵二叉树的最大距离',
{'思路':[
{'1.与头节点无关':[
'max(左侧的最大距离,右侧的最大距离)',
]},
{'2.与头节点有头':[
'左树高+右树高+1'
]}
]},
{'实现':[
'Class Info(){',
' int maxDistance;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(head==null){',
' return new Info(0,0);',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)+1;',
' int maxDistance=Math.max(',
' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',
' leftInfo.height+rightInfo.height+1)',
' return new Info(maxDistance,height);',
'}'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
flexible
|
{
"blob_id": "b713e38824db13f919484b071fb35afb29e26baa",
"index": 3803,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, parentdir)\n<mask token>\ns2.setTitle('二叉树——递归套路')\n<mask token>\nr2.setTitle('二叉树——递归套路')\n<mask token>\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-3": "<mask token>\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, parentdir)\n<mask token>\nxmind_name = '数据结构'\nw = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\ns2 = w.createSheet()\ns2.setTitle('二叉树——递归套路')\nr2 = s2.getRootTopic()\nr2.setTitle('二叉树——递归套路')\ncontent = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':\n ['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',\n '3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',\n ' int height;', '}', '---------------------',\n 'Info process(Node head){', ' if(node==null){', ' return node;',\n ' }', ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'\n , ' isBalanced=false;', ' }',\n ' return new Info(isBalanced,height);', '}']}], '题目2': [\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [\n 'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [\n 'Class Info(){', ' int maxDistance;', ' int height;', '}',\n '---------------------', 'Info process(Node head){',\n ' if(head==null){', ' return new Info(0,0);', ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);', '}']}]}\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-4": "import os, sys\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, parentdir)\nimport xmind\nfrom xmind.core.markerref import MarkerId\nxmind_name = '数据结构'\nw = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\ns2 = w.createSheet()\ns2.setTitle('二叉树——递归套路')\nr2 = s2.getRootTopic()\nr2.setTitle('二叉树——递归套路')\ncontent = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':\n ['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',\n '3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',\n ' int height;', '}', '---------------------',\n 'Info process(Node head){', ' if(node==null){', ' return node;',\n ' }', ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'\n , ' isBalanced=false;', ' }',\n ' return new Info(isBalanced,height);', '}']}], '题目2': [\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [\n 'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [\n 'Class Info(){', ' int maxDistance;', ' int height;', '}',\n '---------------------', 'Info process(Node head){',\n ' if(head==null){', ' return new Info(0,0);', ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);', '}']}]}\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-5": "import os,sys \nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) \nsys.path.insert(0,parentdir) \n\nimport xmind\nfrom xmind.core.markerref import MarkerId\nxmind_name=\"数据结构\"\nw = xmind.load(os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") \ns2=w.createSheet()\ns2.setTitle(\"二叉树——递归套路\")\nr2=s2.getRootTopic()\nr2.setTitle(\"二叉树——递归套路\")\n\n\ncontent={\n'递归套路':[\n '可解决面试中绝大多数二叉树问题,尤其是树型dp问题',\n '本质是利用递归遍历二叉树的便利性'\n],\n'思路':[\n '1.假设以x节点为为头,假设可以向X左树和X右树要任何信息',\n '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息',\n '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求',\n '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'\n],\n'题目1':[\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树',\n {'思路':[\n '1.左子树是否平衡',\n '2.右子树是否平衡',\n '3.左树与右树高在2以内',\n ]},\n {'实现':[\n 'Class Info(){',\n ' boolean isBalanced;',\n ' int height;',\n '}',\n '---------------------',\n 'Info process(Node head){',\n ' if(node==null){',\n ' return node;',\n ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){',\n ' isBalanced=false;',\n ' }',\n ' return new Info(isBalanced,height);',\n '}'\n ]}\n],\n'题目2':[\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离',\n '返回整棵二叉树的最大距离',\n {'思路':[\n {'1.与头节点无关':[\n 'max(左侧的最大距离,右侧的最大距离)',\n ]},\n {'2.与头节点有头':[\n '左树高+右树高+1'\n ]}\n ]},\n {'实现':[\n 'Class Info(){',\n ' int maxDistance;',\n ' int height;',\n '}',\n '---------------------',\n 'Info process(Node head){',\n ' if(head==null){',\n ' return new Info(0,0);',\n ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);',\n '}'\n ]}\n \n]\n\n}\n#构建xmind\nxmind.build(content,r2)\n#保存xmind\nxmind.save(w,os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Config(object):
DEBUG = False
TESTING = False
class ProductionConfig(Config):
CORS_ALLOWED_ORIGINS = "productionexample.com"
class DevelopmentConfig(Config):
DEBUG = True
CORS_ALLOWED_ORIGINS = "developmentexample.com"
class TestingConfig(Config):
TESTING = True
|
normal
|
{
"blob_id": "b76c868a29b5edd07d0da60b1a13ddb4ac3e2913",
"index": 6988,
"step-1": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-2": "<mask token>\n\n\nclass ProductionConfig(Config):\n <mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-3": "class Config(object):\n <mask token>\n <mask token>\n\n\nclass ProductionConfig(Config):\n CORS_ALLOWED_ORIGINS = 'productionexample.com'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-4": "class Config(object):\n DEBUG = False\n TESTING = False\n\n\nclass ProductionConfig(Config):\n CORS_ALLOWED_ORIGINS = 'productionexample.com'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-5": "class Config(object):\n DEBUG = False\n TESTING = False\n\n\nclass ProductionConfig(Config):\n CORS_ALLOWED_ORIGINS = \"productionexample.com\"\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = \"developmentexample.com\"\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
def augment(components, augmentors, use_o=False):
"""
Augmenting of images.
:param components: components
:return: updated components.
"""
img_path = components[0]
height = components[1]
width = components[2]
center = components[3]
bbox = components[4]
area = components[5]
num_keypoints = components[6]
masks_segments = components[7]
scale = components[8]
all_joints = components[9]
img = components[10]
mask = components[11]
aug_center = components[12]
aug_joints = components[13]
idx = components[14]
meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
meta.masks_segments = masks_segments
meta.all_joints = all_joints
meta.img = img
meta.mask = mask
meta.aug_center = aug_center
meta.aug_joints = aug_joints
aug_center = meta.center.copy()
aug_joints = joints_to_point8(meta.all_joints)
if idx % 2 == 1:
o_meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
o_meta.all_joints = all_joints
o_meta.img = img
o_meta.mask = mask
o_meta.aug_center = aug_center
o_meta.aug_joints = aug_joints
o_aug_center = o_meta.center.copy()
o_aug_joints = joints_to_point8(o_meta.all_joints)
o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,
mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))
o_img, o_mask = o_trans.apply_image(o_meta)
o_aug_joints = o_trans.apply_coords(o_aug_joints)
o_meta.aug_joints = point8_to_joints(o_aug_joints)
return [o_img, o_meta.aug_joints]
else:
for aug in augmentors:
transformation = aug.get_transform(AugImgMetadata(img=meta.img,
mask=meta.mask, center=aug_center, scale=meta.scale))
im, mask = transformation.apply_image(meta)
aug_joints = transformation.apply_coords(aug_joints)
if isinstance(transformation, FlipTransform):
aug_joints = transformation.recover_left_right(aug_joints)
aug_center = transformation.apply_coords(aug_center)
meta.img = im
meta.mask = mask
meta.aug_joints = point8_to_joints(aug_joints)
meta.aug_center = aug_center
back_img = meta.img
back_aug_joints = meta.aug_joints
return [back_img, back_aug_joints]
<|reserved_special_token_0|>
def create_all_mask(mask, num, stride):
"""
Helper function to create a stack of scaled down mask.
:param mask: mask image
:param num: number of layers
:param stride: parameter used to scale down the mask image because it has
the same size as orginal image. We need the size of network output.
:return:
"""
scale_factor = 1.0 / stride
small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,
interpolation=cv2.INTER_CUBIC)
small_mask = small_mask[:, :, np.newaxis]
return np.repeat(small_mask, num, axis=2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_img(components):
"""
Loads image from meta.img_path. Assigns the image to
the field img of the same meta instance.
:param components: components
:return: updated components
"""
img_buf = open(components[0], 'rb').read()
if not img_buf:
raise Exception('image not read, path=%s' % components[0])
arr = np.fromstring(img_buf, np.uint8)
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
components[1], components[2] = img.shape[:2]
components[10] = img
return components
def gen_mask(components):
"""
Generate masks based on the coco mask polygons.
:param components: components
:return: updated components
"""
masks_segments = components[7]
hh = components[1]
ww = components[2]
if masks_segments:
mask_miss = np.ones((hh, ww), dtype=np.uint8)
for seg in masks_segments:
bin_mask = maskUtils.decode(seg)
bin_mask = np.logical_not(bin_mask)
mask_miss = np.bitwise_and(mask_miss, bin_mask)
components[11] = mask_miss
return components
def augment(components, augmentors, use_o=False):
"""
Augmenting of images.
:param components: components
:return: updated components.
"""
img_path = components[0]
height = components[1]
width = components[2]
center = components[3]
bbox = components[4]
area = components[5]
num_keypoints = components[6]
masks_segments = components[7]
scale = components[8]
all_joints = components[9]
img = components[10]
mask = components[11]
aug_center = components[12]
aug_joints = components[13]
idx = components[14]
meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
meta.masks_segments = masks_segments
meta.all_joints = all_joints
meta.img = img
meta.mask = mask
meta.aug_center = aug_center
meta.aug_joints = aug_joints
aug_center = meta.center.copy()
aug_joints = joints_to_point8(meta.all_joints)
if idx % 2 == 1:
o_meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
o_meta.all_joints = all_joints
o_meta.img = img
o_meta.mask = mask
o_meta.aug_center = aug_center
o_meta.aug_joints = aug_joints
o_aug_center = o_meta.center.copy()
o_aug_joints = joints_to_point8(o_meta.all_joints)
o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,
mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))
o_img, o_mask = o_trans.apply_image(o_meta)
o_aug_joints = o_trans.apply_coords(o_aug_joints)
o_meta.aug_joints = point8_to_joints(o_aug_joints)
return [o_img, o_meta.aug_joints]
else:
for aug in augmentors:
transformation = aug.get_transform(AugImgMetadata(img=meta.img,
mask=meta.mask, center=aug_center, scale=meta.scale))
im, mask = transformation.apply_image(meta)
aug_joints = transformation.apply_coords(aug_joints)
if isinstance(transformation, FlipTransform):
aug_joints = transformation.recover_left_right(aug_joints)
aug_center = transformation.apply_coords(aug_center)
meta.img = im
meta.mask = mask
meta.aug_joints = point8_to_joints(aug_joints)
meta.aug_center = aug_center
back_img = meta.img
back_aug_joints = meta.aug_joints
return [back_img, back_aug_joints]
<|reserved_special_token_0|>
def create_all_mask(mask, num, stride):
"""
Helper function to create a stack of scaled down mask.
:param mask: mask image
:param num: number of layers
:param stride: parameter used to scale down the mask image because it has
the same size as orginal image. We need the size of network output.
:return:
"""
scale_factor = 1.0 / stride
small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,
interpolation=cv2.INTER_CUBIC)
small_mask = small_mask[:, :, np.newaxis]
return np.repeat(small_mask, num, axis=2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_img(components):
"""
Loads image from meta.img_path. Assigns the image to
the field img of the same meta instance.
:param components: components
:return: updated components
"""
img_buf = open(components[0], 'rb').read()
if not img_buf:
raise Exception('image not read, path=%s' % components[0])
arr = np.fromstring(img_buf, np.uint8)
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
components[1], components[2] = img.shape[:2]
components[10] = img
return components
def gen_mask(components):
"""
Generate masks based on the coco mask polygons.
:param components: components
:return: updated components
"""
masks_segments = components[7]
hh = components[1]
ww = components[2]
if masks_segments:
mask_miss = np.ones((hh, ww), dtype=np.uint8)
for seg in masks_segments:
bin_mask = maskUtils.decode(seg)
bin_mask = np.logical_not(bin_mask)
mask_miss = np.bitwise_and(mask_miss, bin_mask)
components[11] = mask_miss
return components
def augment(components, augmentors, use_o=False):
"""
Augmenting of images.
:param components: components
:return: updated components.
"""
img_path = components[0]
height = components[1]
width = components[2]
center = components[3]
bbox = components[4]
area = components[5]
num_keypoints = components[6]
masks_segments = components[7]
scale = components[8]
all_joints = components[9]
img = components[10]
mask = components[11]
aug_center = components[12]
aug_joints = components[13]
idx = components[14]
meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
meta.masks_segments = masks_segments
meta.all_joints = all_joints
meta.img = img
meta.mask = mask
meta.aug_center = aug_center
meta.aug_joints = aug_joints
aug_center = meta.center.copy()
aug_joints = joints_to_point8(meta.all_joints)
if idx % 2 == 1:
o_meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
o_meta.all_joints = all_joints
o_meta.img = img
o_meta.mask = mask
o_meta.aug_center = aug_center
o_meta.aug_joints = aug_joints
o_aug_center = o_meta.center.copy()
o_aug_joints = joints_to_point8(o_meta.all_joints)
o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,
mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))
o_img, o_mask = o_trans.apply_image(o_meta)
o_aug_joints = o_trans.apply_coords(o_aug_joints)
o_meta.aug_joints = point8_to_joints(o_aug_joints)
return [o_img, o_meta.aug_joints]
else:
for aug in augmentors:
transformation = aug.get_transform(AugImgMetadata(img=meta.img,
mask=meta.mask, center=aug_center, scale=meta.scale))
im, mask = transformation.apply_image(meta)
aug_joints = transformation.apply_coords(aug_joints)
if isinstance(transformation, FlipTransform):
aug_joints = transformation.recover_left_right(aug_joints)
aug_center = transformation.apply_coords(aug_center)
meta.img = im
meta.mask = mask
meta.aug_joints = point8_to_joints(aug_joints)
meta.aug_center = aug_center
back_img = meta.img
back_aug_joints = meta.aug_joints
return [back_img, back_aug_joints]
def apply_mask(components):
"""
Applies the mask (if exists) to the image.
:param components: components
:return: updated components
"""
img = components[10]
mask = components[11]
if mask is not None:
img[:, :, 0] = img[:, :, 0] * mask
img[:, :, 1] = img[:, :, 1] * mask
img[:, :, 2] = img[:, :, 2] * mask
img[img == 0] = 128
return components
def create_all_mask(mask, num, stride):
"""
Helper function to create a stack of scaled down mask.
:param mask: mask image
:param num: number of layers
:param stride: parameter used to scale down the mask image because it has
the same size as orginal image. We need the size of network output.
:return:
"""
scale_factor = 1.0 / stride
small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,
interpolation=cv2.INTER_CUBIC)
small_mask = small_mask[:, :, np.newaxis]
return np.repeat(small_mask, num, axis=2)
<|reserved_special_token_1|>
import cv2
import numpy as np
from pycocotools.coco import maskUtils
from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata
from dataset.base_dataflow import Meta
def read_img(components):
"""
Loads image from meta.img_path. Assigns the image to
the field img of the same meta instance.
:param components: components
:return: updated components
"""
img_buf = open(components[0], 'rb').read()
if not img_buf:
raise Exception('image not read, path=%s' % components[0])
arr = np.fromstring(img_buf, np.uint8)
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
components[1], components[2] = img.shape[:2]
components[10] = img
return components
def gen_mask(components):
"""
Generate masks based on the coco mask polygons.
:param components: components
:return: updated components
"""
masks_segments = components[7]
hh = components[1]
ww = components[2]
if masks_segments:
mask_miss = np.ones((hh, ww), dtype=np.uint8)
for seg in masks_segments:
bin_mask = maskUtils.decode(seg)
bin_mask = np.logical_not(bin_mask)
mask_miss = np.bitwise_and(mask_miss, bin_mask)
components[11] = mask_miss
return components
def augment(components, augmentors, use_o=False):
"""
Augmenting of images.
:param components: components
:return: updated components.
"""
img_path = components[0]
height = components[1]
width = components[2]
center = components[3]
bbox = components[4]
area = components[5]
num_keypoints = components[6]
masks_segments = components[7]
scale = components[8]
all_joints = components[9]
img = components[10]
mask = components[11]
aug_center = components[12]
aug_joints = components[13]
idx = components[14]
meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
meta.masks_segments = masks_segments
meta.all_joints = all_joints
meta.img = img
meta.mask = mask
meta.aug_center = aug_center
meta.aug_joints = aug_joints
aug_center = meta.center.copy()
aug_joints = joints_to_point8(meta.all_joints)
if idx % 2 == 1:
o_meta = Meta(img_path, height, width, center, bbox, area, scale,
num_keypoints)
o_meta.all_joints = all_joints
o_meta.img = img
o_meta.mask = mask
o_meta.aug_center = aug_center
o_meta.aug_joints = aug_joints
o_aug_center = o_meta.center.copy()
o_aug_joints = joints_to_point8(o_meta.all_joints)
o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,
mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))
o_img, o_mask = o_trans.apply_image(o_meta)
o_aug_joints = o_trans.apply_coords(o_aug_joints)
o_meta.aug_joints = point8_to_joints(o_aug_joints)
return [o_img, o_meta.aug_joints]
else:
for aug in augmentors:
transformation = aug.get_transform(AugImgMetadata(img=meta.img,
mask=meta.mask, center=aug_center, scale=meta.scale))
im, mask = transformation.apply_image(meta)
aug_joints = transformation.apply_coords(aug_joints)
if isinstance(transformation, FlipTransform):
aug_joints = transformation.recover_left_right(aug_joints)
aug_center = transformation.apply_coords(aug_center)
meta.img = im
meta.mask = mask
meta.aug_joints = point8_to_joints(aug_joints)
meta.aug_center = aug_center
back_img = meta.img
back_aug_joints = meta.aug_joints
return [back_img, back_aug_joints]
def apply_mask(components):
"""
Applies the mask (if exists) to the image.
:param components: components
:return: updated components
"""
img = components[10]
mask = components[11]
if mask is not None:
img[:, :, 0] = img[:, :, 0] * mask
img[:, :, 1] = img[:, :, 1] * mask
img[:, :, 2] = img[:, :, 2] * mask
img[img == 0] = 128
return components
def create_all_mask(mask, num, stride):
"""
Helper function to create a stack of scaled down mask.
:param mask: mask image
:param num: number of layers
:param stride: parameter used to scale down the mask image because it has
the same size as orginal image. We need the size of network output.
:return:
"""
scale_factor = 1.0 / stride
small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,
interpolation=cv2.INTER_CUBIC)
small_mask = small_mask[:, :, np.newaxis]
return np.repeat(small_mask, num, axis=2)
<|reserved_special_token_1|>
import cv2
import numpy as np
from pycocotools.coco import maskUtils
# from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata
# from dataset.base_dataflow import Meta
from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata
from dataset.base_dataflow import Meta
def read_img(components):
"""
Loads image from meta.img_path. Assigns the image to
the field img of the same meta instance.
:param components: components
:return: updated components
"""
img_buf = open(components[0], 'rb').read()
if not img_buf:
raise Exception('image not read, path=%s' % components[0])
arr = np.fromstring(img_buf, np.uint8)
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
components[1], components[2] = img.shape[:2]
components[10] = img
return components
def gen_mask(components):
"""
Generate masks based on the coco mask polygons.
:param components: components
:return: updated components
"""
masks_segments = components[7]
hh = components[1]
ww = components[2]
if masks_segments:
mask_miss = np.ones((hh, ww), dtype=np.uint8)
for seg in masks_segments:
bin_mask = maskUtils.decode(seg)
bin_mask = np.logical_not(bin_mask)
mask_miss = np.bitwise_and(mask_miss, bin_mask)
components[11] = mask_miss
return components
# components == df
# seems params' type is list
def augment(components, augmentors,use_o=False):
"""
Augmenting of images.
:param components: components
:return: updated components.
"""
img_path = components[0]
height = components[1]
width = components[2]
center = components[3]
bbox = components[4]
area = components[5]
num_keypoints = components[6]
masks_segments = components[7]
scale = components[8]
all_joints = components[9]
img = components[10]
mask = components[11]
aug_center = components[12]
aug_joints = components[13]
idx = components[14]
meta = Meta(img_path, height, width, center, bbox,
area, scale, num_keypoints)
meta.masks_segments = masks_segments
meta.all_joints = all_joints
meta.img = img
meta.mask = mask
meta.aug_center = aug_center
meta.aug_joints = aug_joints
aug_center = meta.center.copy()
aug_joints = joints_to_point8(meta.all_joints)
if idx % 2 == 1:
# print(f"ori: {idx//2}, {idx}")
o_meta= Meta(img_path, height, width, center, bbox,
area, scale, num_keypoints)
o_meta.all_joints=all_joints
o_meta.img=img
o_meta.mask=mask
o_meta.aug_center=aug_center
o_meta.aug_joints=aug_joints
o_aug_center=o_meta.center.copy()
o_aug_joints=joints_to_point8(o_meta.all_joints)
o_trans=augmentors[4].get_transform(AugImgMetadata(
img=o_meta.img,
mask = o_meta.mask,
center=o_aug_center,
scale=o_meta.scale
))
o_img,o_mask=o_trans.apply_image(o_meta)
o_aug_joints = o_trans.apply_coords(o_aug_joints)
# o_aug_center = o_trans.apply_coords(o_aug_center)
# o_meta.img=o_img
# o_meta.mask=mask
o_meta.aug_joints=point8_to_joints(o_aug_joints)
# o_meta.aug_center=o_aug_center
return [o_img,o_meta.aug_joints]
else:
for aug in augmentors:
transformation = aug.get_transform(
AugImgMetadata(img=meta.img,
mask=meta.mask,
center=aug_center,
scale=meta.scale))
im, mask = transformation.apply_image(meta)
# augment joints
aug_joints = transformation.apply_coords(aug_joints)
# after flipping horizontaly the left side joints and right side joints are also
# flipped so we need to recover their orginal orientation.
if isinstance(transformation, FlipTransform):
aug_joints = transformation.recover_left_right(aug_joints)
# augment center position
aug_center = transformation.apply_coords(aug_center)
meta.img = im
meta.mask = mask
meta.aug_joints = point8_to_joints(aug_joints)
meta.aug_center = aug_center
back_img=meta.img
back_aug_joints = meta.aug_joints
# del meta
# return [[back_img,back_aug_joints],
# [o_meta.img,o_meta.aug_joints]]
return [back_img,back_aug_joints]
def apply_mask(components):
"""
Applies the mask (if exists) to the image.
:param components: components
:return: updated components
"""
img = components[10]
mask = components[11]
if mask is not None:
img[:, :, 0] = img[:, :, 0] * mask
img[:, :, 1] = img[:, :, 1] * mask
img[:, :, 2] = img[:, :, 2] * mask
img[img == 0] = 128
return components
def create_all_mask(mask, num, stride):
"""
Helper function to create a stack of scaled down mask.
:param mask: mask image
:param num: number of layers
:param stride: parameter used to scale down the mask image because it has
the same size as orginal image. We need the size of network output.
:return:
"""
scale_factor = 1.0 / stride
small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)
small_mask = small_mask[:, :, np.newaxis]
return np.repeat(small_mask, num, axis=2)
|
flexible
|
{
"blob_id": "e47223622a2718830d830dbb779800659d659ae3",
"index": 8472,
"step-1": "<mask token>\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\n<mask token>\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-2": "<mask token>\n\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n img_buf = open(components[0], 'rb').read()\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n components[11] = mask_miss\n return components\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\n<mask token>\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-3": "<mask token>\n\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n img_buf = open(components[0], 'rb').read()\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n components[11] = mask_miss\n return components\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\ndef apply_mask(components):\n \"\"\"\n Applies the mask (if exists) to the image.\n\n :param components: components\n :return: updated components\n \"\"\"\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-4": "import cv2\nimport numpy as np\nfrom pycocotools.coco import maskUtils\nfrom dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata\nfrom dataset.base_dataflow import Meta\n\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n img_buf = open(components[0], 'rb').read()\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n components[11] = mask_miss\n return components\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\ndef apply_mask(components):\n \"\"\"\n Applies the mask (if exists) to the image.\n\n :param components: components\n :return: updated components\n \"\"\"\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-5": "import cv2\nimport numpy as np\n\nfrom pycocotools.coco import maskUtils\n\n# from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata\n\n# from dataset.base_dataflow import Meta\n\nfrom dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata\n\nfrom dataset.base_dataflow import Meta\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n\n img_buf = open(components[0], 'rb').read()\n\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n\n components[11] = mask_miss\n\n return components\n\n\n# components == df\n# seems params' type is list\ndef augment(components, augmentors,use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n \n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n\n meta = Meta(img_path, height, width, center, bbox,\n area, scale, num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n\n if idx % 2 == 1:\n # print(f\"ori: {idx//2}, {idx}\")\n o_meta= Meta(img_path, height, width, center, bbox,\n area, scale, num_keypoints)\n o_meta.all_joints=all_joints\n o_meta.img=img\n o_meta.mask=mask\n o_meta.aug_center=aug_center\n o_meta.aug_joints=aug_joints\n \n o_aug_center=o_meta.center.copy()\n o_aug_joints=joints_to_point8(o_meta.all_joints)\n \n o_trans=augmentors[4].get_transform(AugImgMetadata(\n img=o_meta.img,\n mask = o_meta.mask,\n center=o_aug_center,\n scale=o_meta.scale\n ))\n \n o_img,o_mask=o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n # o_aug_center = o_trans.apply_coords(o_aug_center)\n # o_meta.img=o_img\n # o_meta.mask=mask\n o_meta.aug_joints=point8_to_joints(o_aug_joints)\n # o_meta.aug_center=o_aug_center\n return [o_img,o_meta.aug_joints]\n \n else:\n\n for aug in augmentors:\n transformation = aug.get_transform(\n AugImgMetadata(img=meta.img,\n mask=meta.mask,\n center=aug_center,\n scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n\n # augment joints\n aug_joints = transformation.apply_coords(aug_joints)\n\n # after flipping horizontaly the left side joints and right side joints are also\n # flipped so we need to recover their orginal orientation.\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n\n # augment center position\n aug_center = transformation.apply_coords(aug_center)\n\n meta.img = im\n meta.mask = mask\n\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n\n back_img=meta.img\n back_aug_joints = meta.aug_joints\n # del meta\n\n # return [[back_img,back_aug_joints],\n # [o_meta.img,o_meta.aug_joints]]\n\n return [back_img,back_aug_joints]\n\n\ndef apply_mask(components):\n \"\"\"\n Applies the mask (if exists) to the image.\n\n :param components: components\n :return: updated components\n \"\"\"\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Vintage:
<|reserved_special_token_0|>
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Vintage:
<|reserved_special_token_0|>
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print('Saved dataframe to', path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print('Test values parsed OK for', self)
return True
def __repr__(self):
return 'Vintage({}, {})'.format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Vintage:
"""Represents dataset release for a given year and month."""
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print('Saved dataframe to', path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print('Test values parsed OK for', self)
return True
def __repr__(self):
return 'Vintage({}, {})'.format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_dataframes(csvfile, spec=SPEC):
"""Extract dataframes from *csvfile* using *spec* parsing instructions.
Args:
csvfile (file connection or StringIO) - CSV file for parsing
spec (spec.Specification) - pasing instructions, defaults to spec.SPEC
Returns:
Three pandas dataframes at annual, qtr and monthly frequencies
in a dictionary.
"""
tables = [t for csv_segment, pdef in Reader(csvfile, spec).items() for
t in extract_tables(csv_segment, pdef)]
emitter = Emitter(tables)
return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}
class Vintage:
"""Represents dataset release for a given year and month."""
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print('Saved dataframe to', path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print('Test values parsed OK for', self)
return True
def __repr__(self):
return 'Vintage({}, {})'.format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""Get pandas dataframes for a given data and month.
*get_dataframes(csvfile, spec=SPEC)* is a function to get dataframes
from *csvfile* connection under *spec* parsing instruction.
*Vintage* class addresses dataset by year and month:
Vintage(year, month).save()
Vintage(year, month).validate()
*Collection* manipulates all datasets, released at various dates:
Collection.save_all()
Collection.save_latest()
Collection.approve_latest()
Collection.approve_all()
"""
from config import LocalCSV, LATEST_DATE, SUPPORTED_DATES
from csv2df.specification import SPEC
from csv2df.reader import Reader, open_csv
from csv2df.parser import extract_tables
from csv2df.emitter import Emitter
from csv2df.validator import Validator
__all__ = ['get_dataframes', 'Vintage', 'Collection']
FREQUENCIES = ['a', 'q', 'm']
def get_dataframes(csvfile, spec=SPEC):
"""Extract dataframes from *csvfile* using *spec* parsing instructions.
Args:
csvfile (file connection or StringIO) - CSV file for parsing
spec (spec.Specification) - pasing instructions, defaults to spec.SPEC
Returns:
Three pandas dataframes at annual, qtr and monthly frequencies
in a dictionary.
"""
tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()
for t in extract_tables(csv_segment, pdef)]
emitter = Emitter(tables)
return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}
class Vintage:
"""Represents dataset release for a given year and month."""
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print("Saved dataframe to", path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print("Test values parsed OK for", self)
return True
def __repr__(self):
return "Vintage({}, {})".format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print("Checking", year, month)
vintage = Vintage(year, month)
vintage.validate()
if __name__ == "__main__":
# Collection calls
# Collection.approve_latest()
# Collection.approve_all()
# Collection.save_latest()
# Collection.save_all()
# sample Vintage call
year, month = 2015, 5
vint = Vintage(year, month)
vint.validate()
#dfa, dfq, dfm = vint.dfs()
|
flexible
|
{
"blob_id": "e78c4f65d84d5b33debb415005e22f926e14d7d4",
"index": 1203,
"step-1": "<mask token>\n\n\nclass Vintage:\n <mask token>\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vintage:\n <mask token>\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property\n def dfs(self):\n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print('Saved dataframe to', path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print('Test values parsed OK for', self)\n return True\n\n def __repr__(self):\n return 'Vintage({}, {})'.format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Vintage:\n \"\"\"Represents dataset release for a given year and month.\"\"\"\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property\n def dfs(self):\n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print('Saved dataframe to', path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print('Test values parsed OK for', self)\n return True\n\n def __repr__(self):\n return 'Vintage({}, {})'.format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_dataframes(csvfile, spec=SPEC):\n \"\"\"Extract dataframes from *csvfile* using *spec* parsing instructions.\n\n Args:\n csvfile (file connection or StringIO) - CSV file for parsing\n spec (spec.Specification) - pasing instructions, defaults to spec.SPEC\n\n Returns:\n Three pandas dataframes at annual, qtr and monthly frequencies\n in a dictionary.\n \"\"\"\n tables = [t for csv_segment, pdef in Reader(csvfile, spec).items() for\n t in extract_tables(csv_segment, pdef)]\n emitter = Emitter(tables)\n return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}\n\n\nclass Vintage:\n \"\"\"Represents dataset release for a given year and month.\"\"\"\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property\n def dfs(self):\n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print('Saved dataframe to', path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print('Test values parsed OK for', self)\n return True\n\n def __repr__(self):\n return 'Vintage({}, {})'.format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-5": "\"\"\"Get pandas dataframes for a given data and month.\n\n*get_dataframes(csvfile, spec=SPEC)* is a function to get dataframes\n from *csvfile* connection under *spec* parsing instruction.\n\n*Vintage* class addresses dataset by year and month:\n\n Vintage(year, month).save()\n Vintage(year, month).validate()\n\n*Collection* manipulates all datasets, released at various dates:\n\n Collection.save_all()\n Collection.save_latest()\n Collection.approve_latest()\n Collection.approve_all()\n\"\"\"\n\nfrom config import LocalCSV, LATEST_DATE, SUPPORTED_DATES \nfrom csv2df.specification import SPEC\nfrom csv2df.reader import Reader, open_csv\nfrom csv2df.parser import extract_tables\nfrom csv2df.emitter import Emitter\nfrom csv2df.validator import Validator\n\n\n__all__ = ['get_dataframes', 'Vintage', 'Collection']\n\nFREQUENCIES = ['a', 'q', 'm']\n\n\ndef get_dataframes(csvfile, spec=SPEC):\n \"\"\"Extract dataframes from *csvfile* using *spec* parsing instructions.\n\n Args:\n csvfile (file connection or StringIO) - CSV file for parsing\n spec (spec.Specification) - pasing instructions, defaults to spec.SPEC\n\n Returns:\n Three pandas dataframes at annual, qtr and monthly frequencies\n in a dictionary.\n \"\"\"\n tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()\n for t in extract_tables(csv_segment, pdef)]\n emitter = Emitter(tables)\n return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}\n\n\nclass Vintage:\n \"\"\"Represents dataset release for a given year and month.\"\"\"\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property \n def dfs(self): \n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print(\"Saved dataframe to\", path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print(\"Test values parsed OK for\", self)\n return True\n\n def __repr__(self):\n return \"Vintage({}, {})\".format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n\n all_dates = SUPPORTED_DATES \n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print(\"Checking\", year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\nif __name__ == \"__main__\":\n # Collection calls\n # Collection.approve_latest()\n # Collection.approve_all()\n # Collection.save_latest()\n # Collection.save_all()\n\n # sample Vintage call\n year, month = 2015, 5\n vint = Vintage(year, month)\n vint.validate()\n #dfa, dfq, dfm = vint.dfs()\n",
"step-ids": [
9,
13,
14,
15,
19
]
}
|
[
9,
13,
14,
15,
19
] |
from django.contrib import admin
from .models import Game, Scrap
admin.site.register(Game)
admin.site.register(Scrap)
|
normal
|
{
"blob_id": "7e328992392a4ff2b0e23920a8907e38f63fcff0",
"index": 7168,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Game)\nadmin.site.register(Scrap)\n",
"step-3": "from django.contrib import admin\nfrom .models import Game, Scrap\nadmin.site.register(Game)\nadmin.site.register(Scrap)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Skeleton:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, nml_path: str=None, parameters: Parameters=None,
strict=True):
""" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments
Args:
nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the
trees and additional properties specified in the .nml file
parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties
of the skeleton.
strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with
webKnossos. Default: True
Examples:
Using nml_path:
nml_path = '/path/to/example.nml'
skel = Skeleton(nml_path)
Using parameters:
parameters = Skeleton.define_parameters(name="2017-01-12_FD0156-2", scale=(11.24, 11.24, 32))
skel = Skeleton(parameters=parameters)
"""
assert (nml_path is not None) ^ (parameters is not None
), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'
self.nodes = list()
self.edges = list()
self.names = list()
self.colors = list()
self.tree_ids = list()
self.group_ids = list()
self.groups = list()
self.branchpoints = list()
self.parameters = Parameters()
self.nml_path = str()
self.strict = strict
self.defaults = self.DEFAULTS
if nml_path is not None:
assert os.path.exists(nml_path), 'not a valid path: {}'.format(
nml_path)
try:
with open(nml_path, 'rb') as f:
nml = wknml.parse_nml(f)
except IOError:
print('not a valid nml file: {}'.format(nml_path))
self._nml_to_skeleton(nml)
else:
assert type(parameters
) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'
self._parameters_to_skeleton(parameters)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def add_trees_from_skel(self, skel: 'Skeleton'):
""" Appends all trees contained in a different skeleton object to the skeleton.
This method attempts to preserve the relative group structure found in the skeleton object to be added
Args:
skel: Source skeleton object (different from the one calling this method) to be added
"""
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
max_group_id = self.max_group_id()
if max_group_id is not None:
skel._reset_group_ids(max_group_id + 1)
self.nodes = self.nodes + skel.nodes
self.edges = self.edges + skel.edges
self.tree_ids = self.tree_ids + skel.tree_ids
self.group_ids = self.group_ids + skel.group_ids
self.groups = self.groups + skel.groups
self.names = self.names + skel.names
self.colors = self.colors + skel.colors
return self
def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,
group_ids: List[int]=None, names: List[str]=None, colors: List[
Tuple[float, float, float, float]]=None):
""" Appends each of the specified nodes as separate trees to the skeleton (1 node each).
Args:
nodes: Nodes representing the trees to be added
tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]
group_ids (optional): Group ids to be assigned to the newly added trees. Default: None
names (optional): Names to be assigned to the newly added trees.
colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if tree_ids is None:
tree_id_start = self.max_tree_id() + 1
tree_id_end = tree_id_start + len(nodes)
tree_ids = list(range(tree_id_start, tree_id_end))
if group_ids is None:
group_ids = [None for x in range(len(nodes))]
if names is None:
names = ['' for x in range(len(nodes))]
if colors is None:
colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]
for node_idx, _ in nodes.iterrows():
self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=
tree_ids[node_idx], group_id=group_ids[node_idx], name=
names[node_idx], color=colors[node_idx])
<|reserved_special_token_0|>
def add_group(self, parent_id: int=None, id: int=None, name: str=None):
""" Adds a new group to skeleton object.
Args:
parent_id: Parent group id to which new group is added as a child. Default: None (root group)
id: Id of new group to be added. Default: Current max group id + 1
name: Name of new group to be added. Default: 'Group {}'.format(id)
Returns:
id: Id of added group
name: Name of added group
"""
if parent_id is not None:
assert parent_id in self.group_ids, 'Parent id does not exist'
if id is None:
id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)
else:
assert id not in self.groups_ids(), 'Id already exists'
if name is None:
name = 'Group {}'.format(id)
new_group = wknml.Group(id, name, [])
if parent_id is None:
self.groups.append(new_group)
else:
self.groups = Skeleton._group_append(self.groups, parent_id,
new_group)
return id, name
def delete_group(self, id, target_id):
pass
def define_nodes(self, position_x: List[int], position_y: List[int],
position_z: List[int], id: List[int]=None, radius: Optional[List[
int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:
Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,
inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,
bitDepth: Optional[List[int]]=None, interpolation: Optional[List[
bool]]=None, time: Optional[List[int]]=None, comment: Optional[List
[int]]=None) ->Nodes:
""" Generates new nodes table from data.
Args:
position_x: Node position x
position_y: Node position y
position_z: Node position z
id (optional): (Globally unique) Node id. Default: New unique ids are generated
radius (optional): Node radius
rotation_x (optional): Node rotation x
rotation_y (optional): Node rotation y
rotation_z (optional): Node rotation z
inVP (optional): Viewport index in which node was placed
inMag (optional): (De-)Magnification factor in which node was placed
bitDepth (optional): Bit (Color) Depth in which node was placed
interpolation (optional): Interpolation state in which node was placed
time (optional): Time stamp at which node was placed
comment (optional): Comment associated with node
Returns:
nodes: Nodes object
"""
if id is None:
id_max = self.max_node_id()
id = list(range(id_max + 1, id_max + len(position_x) + 1))
nodes = Nodes.from_list(id, position_x, position_y, position_z,
radius, rotation_x, rotation_y, rotation_z, inVP, inMag,
bitDepth, interpolation, time, comment)
return nodes
def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:
""" Generates new nodes table from positions only (node ids are generated automatically).
Args:
positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table
Returns:
nodes: Nodes object
"""
id_max = self.max_node_id()
id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)
).reshape(-1, 1)
nodes = Nodes.from_numpy(np.append(id, positions, axis=1))
return nodes
<|reserved_special_token_0|>
def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],
np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:
""" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position
Args:
position (1 x 3): Target (x,y,z) position to which the node distances should be computed
tree_idx: Tree idx for which node distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
if type(position) is not np.ndarray:
position = np.array(position)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].
position.values), position, unit_factor)
return distances
def get_graph(self, tree_idx):
""" Returns the networkx graph representation of a tree.
Args:
tree_idx: Linear index of the tree to be returned as graph object
Returns:
graph: Graph object
"""
nodes = self.nodes[tree_idx]
edges = self.edges[tree_idx]
graph = Skeleton._get_graph(nodes, edges)
return graph
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:
""" Returns the node id for the provided tree and node idx."""
node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]
return node_id
<|reserved_special_token_0|>
def max_group_id(self) ->int:
""" Returns highest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmax(group_ids))
return group_id
def min_node_id(self) ->int:
""" Returns lowest global node id."""
if len(self.nodes) > 0:
min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for
nodes in self.nodes])
else:
min_node_id = 0
return min_node_id
<|reserved_special_token_0|>
def min_tree_id(self) ->int:
""" Returns lowest global tree id."""
return min(self.tree_ids) if len(self.tree_ids) > 0 else 0
<|reserved_special_token_0|>
def num_trees(self) ->int:
"""Returns number of trees contained in skeleton object."""
return len(self.nodes)
<|reserved_special_token_0|>
def _get_unit_factor(self, unit: str) ->np.ndarray:
""" Returns factor for unit conversion
Args:
unit: Unit for which to return the conversion factor.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)
Returns:
unit_factor (shape=(3,)): Unit conversion factors
"""
unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.
parameters.scale), 'um': np.array(self.parameters.scale) / 1000}
assert unit in unit_factors.keys(), 'Invalid unit'
unit_factor = unit_factors[unit]
return unit_factor
<|reserved_special_token_0|>
def _reset_tree_ids(self, start_id: int):
""" Resets tree ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest tree id should be set.
"""
add_id = start_id - self.min_tree_id()
self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _nml_to_skeleton(self, nml):
""" Converts wknml to skeleton data structures."""
self.groups = nml.groups
self.branchpoints = nml.branchpoints
self.parameters = Parameters(**nml.parameters._asdict())
for tree in nml.trees:
self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree
.nodes, nml_comments=nml.comments), edges=np.array([(edge.
source, edge.target) for edge in tree.edges]), group_id=
tree.groupId, name=tree.name, color=tree.color)
def _skeleton_to_nml(self):
""" Converts skeleton to wknml data structures."""
trees = []
for tree_idx, tree_id in enumerate(self.tree_ids):
nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])
nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])
tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name
=self.names[tree_idx], groupId=self.group_ids[tree_idx],
nodes=nml_nodes, edges=nml_edges)
trees.append(tree)
nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.
_asdict()), trees=trees, branchpoints=self.branchpoints,
comments=self._skeleton_to_nml_comments(), groups=self.groups)
return nml
def _skeleton_to_nml_comments(self):
""" Converts skeleton to wknml comments."""
nml_comments = []
for nodes in self.nodes:
comment_nodes = nodes[nodes['comment'].notnull()]
for _, row in comment_nodes.iterrows():
nml_comment = wknml.Comment(node=row['id'].values[0],
content=row['comment'].values[0])
nml_comments.append(nml_comment)
return nml_comments
@staticmethod
def define_parameters(name: str, scale: Tuple[float, float, float],
offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,
editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),
editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:
float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=
None, userBoundingBox: Tuple[int, int, int, int, int, int]=None
) ->Parameters:
parameters = Parameters(name=name, scale=scale, offset=offset, time
=time, editPosition=editPosition, editRotation=editRotation,
zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,
userBoundingBox=userBoundingBox)
return parameters
<|reserved_special_token_0|>
@staticmethod
def _nml_nodes_to_nodes(nml_nodes, nml_comments):
""" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass)."""
data = [(node.id, node.position[0], node.position[1], node.position
[2], node.radius, node.rotation[0], node.rotation[1], node.
rotation[2], node.inVp, node.inMag, node.bitDepth, node.
interpolation, node.time, np.nan) for node in nml_nodes]
nodes = Nodes(data=data)
comment_node_ids = [comment.node for comment in nml_comments]
comment_strings = [comment.content for comment in nml_comments]
nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]
for id in nodes_ids_comments:
id_comment = comment_strings[comment_node_ids.index(id)]
nodes.loc[nodes.id == id, ('comment', '')] = id_comment
return nodes
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def _group_modify_id(group, id_modifier):
""" Modifies group ids with the passed id_modifier (e.g. lambda) function."""
group = group._replace(id=id_modifier(group.id))
group = group._replace(children=list(map(lambda g: Skeleton.
_group_modify_id(g, id_modifier), group.children)))
return group
@staticmethod
def _group_get_ids(groups, ids=[]):
for group in groups:
ids.append(group.id)
Skeleton._group_get_ids(group.children, ids)
return groups, ids
@staticmethod
def _get_graph(nodes: Nodes, edges: np.ndarray):
""" Returns the networkx graph representation of provided nodes and edges."""
graph = nx.Graph()
graph.add_nodes_from(nodes['id'])
attrs = nodes.set_index('id').to_dict('index')
nx.set_node_attributes(graph, attrs)
graph.add_edges_from(edges)
return graph
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Skeleton:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, nml_path: str=None, parameters: Parameters=None,
strict=True):
""" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments
Args:
nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the
trees and additional properties specified in the .nml file
parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties
of the skeleton.
strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with
webKnossos. Default: True
Examples:
Using nml_path:
nml_path = '/path/to/example.nml'
skel = Skeleton(nml_path)
Using parameters:
parameters = Skeleton.define_parameters(name="2017-01-12_FD0156-2", scale=(11.24, 11.24, 32))
skel = Skeleton(parameters=parameters)
"""
assert (nml_path is not None) ^ (parameters is not None
), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'
self.nodes = list()
self.edges = list()
self.names = list()
self.colors = list()
self.tree_ids = list()
self.group_ids = list()
self.groups = list()
self.branchpoints = list()
self.parameters = Parameters()
self.nml_path = str()
self.strict = strict
self.defaults = self.DEFAULTS
if nml_path is not None:
assert os.path.exists(nml_path), 'not a valid path: {}'.format(
nml_path)
try:
with open(nml_path, 'rb') as f:
nml = wknml.parse_nml(f)
except IOError:
print('not a valid nml file: {}'.format(nml_path))
self._nml_to_skeleton(nml)
else:
assert type(parameters
) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'
self._parameters_to_skeleton(parameters)
def add_tree(self, nodes: Nodes=Nodes(), edges: Union[List[Tuple[int,
int]], np.ndarray]=None, tree_id: int=None, name: str='', group_id:
int=None, color: Tuple[float, float, float, float]=None):
""" Appends new tree to skeleton.
Args:
nodes (optional): Nodes representing tree to be added
edges (optional): Edges representing tree to be added
tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1
name (optional): Name to be used for new tree. Default: Empty str
group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.
Default: None
color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if edges is None:
edges = np.empty((0, 2), dtype=np.uint32)
elif type(edges) is list:
edges = np.asarray(edges)
if self.strict & (len(nodes) > 1):
assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)
) == 1, 'Added tree consists of more than one connected component'
if tree_id is None:
tree_id = self.max_tree_id() + 1
if (group_id is not None) & (group_id not in self.groups_ids()):
self.add_group(id=group_id)
if color is None:
color = self.defaults['tree']['color']
self.nodes.append(nodes)
self.edges.append(edges)
self.tree_ids.append(tree_id)
self.group_ids.append(group_id)
self.names.append(name)
self.colors.append(color)
def add_tree_from_skel(self, skel: 'Skeleton', tree_idx: int, group_id:
int=None, name: str=None):
""" Appends a specific tree contained in a different skeleton object to the skeleton.
Args:
skel: Source skeleton object (different from the one calling this method) to be added
tree_idx: Source tree index of tree to be added
group_id (optional): Target group id to which the added tree should be assigned. Default: None
name (optional): Target name for the added tree
"""
if group_id not in self.groups_ids():
self.add_group(id=group_id)
if name is None:
name = skel.names[tree_idx]
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
self.nodes = self.nodes + [skel.nodes[tree_idx]]
self.edges = self.edges + [skel.edges[tree_idx]]
self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]
self.group_ids = self.group_ids + [group_id]
self.names = self.names + [name]
self.colors = self.colors + [skel.colors[tree_idx]]
return self
def add_trees_from_skel(self, skel: 'Skeleton'):
""" Appends all trees contained in a different skeleton object to the skeleton.
This method attempts to preserve the relative group structure found in the skeleton object to be added
Args:
skel: Source skeleton object (different from the one calling this method) to be added
"""
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
max_group_id = self.max_group_id()
if max_group_id is not None:
skel._reset_group_ids(max_group_id + 1)
self.nodes = self.nodes + skel.nodes
self.edges = self.edges + skel.edges
self.tree_ids = self.tree_ids + skel.tree_ids
self.group_ids = self.group_ids + skel.group_ids
self.groups = self.groups + skel.groups
self.names = self.names + skel.names
self.colors = self.colors + skel.colors
return self
def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,
group_ids: List[int]=None, names: List[str]=None, colors: List[
Tuple[float, float, float, float]]=None):
""" Appends each of the specified nodes as separate trees to the skeleton (1 node each).
Args:
nodes: Nodes representing the trees to be added
tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]
group_ids (optional): Group ids to be assigned to the newly added trees. Default: None
names (optional): Names to be assigned to the newly added trees.
colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if tree_ids is None:
tree_id_start = self.max_tree_id() + 1
tree_id_end = tree_id_start + len(nodes)
tree_ids = list(range(tree_id_start, tree_id_end))
if group_ids is None:
group_ids = [None for x in range(len(nodes))]
if names is None:
names = ['' for x in range(len(nodes))]
if colors is None:
colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]
for node_idx, _ in nodes.iterrows():
self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=
tree_ids[node_idx], group_id=group_ids[node_idx], name=
names[node_idx], color=colors[node_idx])
def delete_tree(self, idx: int=None, id: int=None):
""" Deletes tree with specified idx or id.
Args:
idx: Linear index of tree to be deleted
id: Id of tree to be deleted
"""
if id is not None:
idx = self.tree_ids.index(id)
self.nodes.pop(idx)
self.edges.pop(idx)
self.names.pop(idx)
self.colors.pop(idx)
self.tree_ids.pop(idx)
self.group_ids.pop(idx)
def add_group(self, parent_id: int=None, id: int=None, name: str=None):
""" Adds a new group to skeleton object.
Args:
parent_id: Parent group id to which new group is added as a child. Default: None (root group)
id: Id of new group to be added. Default: Current max group id + 1
name: Name of new group to be added. Default: 'Group {}'.format(id)
Returns:
id: Id of added group
name: Name of added group
"""
if parent_id is not None:
assert parent_id in self.group_ids, 'Parent id does not exist'
if id is None:
id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)
else:
assert id not in self.groups_ids(), 'Id already exists'
if name is None:
name = 'Group {}'.format(id)
new_group = wknml.Group(id, name, [])
if parent_id is None:
self.groups.append(new_group)
else:
self.groups = Skeleton._group_append(self.groups, parent_id,
new_group)
return id, name
def delete_group(self, id, target_id):
pass
def define_nodes(self, position_x: List[int], position_y: List[int],
position_z: List[int], id: List[int]=None, radius: Optional[List[
int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:
Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,
inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,
bitDepth: Optional[List[int]]=None, interpolation: Optional[List[
bool]]=None, time: Optional[List[int]]=None, comment: Optional[List
[int]]=None) ->Nodes:
""" Generates new nodes table from data.
Args:
position_x: Node position x
position_y: Node position y
position_z: Node position z
id (optional): (Globally unique) Node id. Default: New unique ids are generated
radius (optional): Node radius
rotation_x (optional): Node rotation x
rotation_y (optional): Node rotation y
rotation_z (optional): Node rotation z
inVP (optional): Viewport index in which node was placed
inMag (optional): (De-)Magnification factor in which node was placed
bitDepth (optional): Bit (Color) Depth in which node was placed
interpolation (optional): Interpolation state in which node was placed
time (optional): Time stamp at which node was placed
comment (optional): Comment associated with node
Returns:
nodes: Nodes object
"""
if id is None:
id_max = self.max_node_id()
id = list(range(id_max + 1, id_max + len(position_x) + 1))
nodes = Nodes.from_list(id, position_x, position_y, position_z,
radius, rotation_x, rotation_y, rotation_z, inVP, inMag,
bitDepth, interpolation, time, comment)
return nodes
def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:
""" Generates new nodes table from positions only (node ids are generated automatically).
Args:
positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table
Returns:
nodes: Nodes object
"""
id_max = self.max_node_id()
id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)
).reshape(-1, 1)
nodes = Nodes.from_numpy(np.append(id, positions, axis=1))
return nodes
def get_distances_to_node(self, positions: Union[Sequence[Tuple[int,
int, int]], np.ndarray], node_id: int=None, tree_idx: int=None,
node_idx: int=None, unit: str='um') ->List[np.ndarray]:
""" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions
Args:
positions (N x 3): Target (x,y,z) positions to which the distances should be computed
node_id: Node id of the node for which the distances should be computed
tree_idx: Tree idx of the node for which the distances should be computed
node_idx: Node idx of the node for which the distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
assert (node_id is not None) ^ (tree_idx is not None) & (node_idx
is not None
), 'Either provide node_id or both tree_idx and node_idx'
if type(positions) is not np.ndarray:
positions = np.array(positions)
if node_id is not None:
node_idx, tree_idx = self.node_id_to_idx(node_id)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(positions, np.array(self.nodes[
tree_idx].position.values[node_idx]), unit_factor)
return distances
def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],
np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:
""" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position
Args:
position (1 x 3): Target (x,y,z) position to which the node distances should be computed
tree_idx: Tree idx for which node distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
if type(position) is not np.ndarray:
position = np.array(position)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].
position.values), position, unit_factor)
return distances
def get_graph(self, tree_idx):
""" Returns the networkx graph representation of a tree.
Args:
tree_idx: Linear index of the tree to be returned as graph object
Returns:
graph: Graph object
"""
nodes = self.nodes[tree_idx]
edges = self.edges[tree_idx]
graph = Skeleton._get_graph(nodes, edges)
return graph
def get_shortest_path(self, node_id_start: int, node_id_end: int) ->List[
int]:
""" Returns the shortest path between two nodes of a tree.
Args:
node_id_start: Node id of start node
node_id_end: Node id of end node
Returns:
shortest_path: Node indices comprising the shortest path
"""
_, tree_idx_start = self.node_id_to_idx(node_id_start)
_, tree_idx_end = self.node_id_to_idx(node_id_end)
assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'
graph = self.get_graph(tree_idx_start)
shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)
return shortest_path
def plot(self, tree_inds: Union[int, List[int]]=None, view: str=None,
colors: Union[Tuple[float, float, float, float], List[Tuple[float,
float, float, float]], str]=None, unit: str='um', show: bool=True,
ax: plt.axes=None):
""" Generates a (3D) line plot of the trees contained in the skeleton object.
Args:
tree_inds (optional): Tree indices to be plotted.
Default: All trees are plotted
view (optional): Plot as 2D projection on orthonormal plane.
Options: 'xy', 'xz', 'yz'
Default: Plot as 3D projection
colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is
broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.
Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can
be passed as a str.
Default: Skeleton colors (self.colors) are used
unit (optional): Specifies in which unit the plot should be generated.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).
Default: 'um' (micrometer)
show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set
to False. Default: True
ax: Axes to be plotted on.
Returns:
ax: Axes which was plotted on
"""
if tree_inds is None:
tree_inds = list(range(len(self.nodes)))
elif tree_inds is int:
tree_inds = [tree_inds]
if colors is None:
colors = self.colors
elif type(colors) is str:
cmap = cm.get_cmap(colors)
colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]
elif type(colors[0]) is not Sequence:
colors = [colors] * self.num_trees()
unit_factor = self._get_unit_factor(unit)
allowed_views = ['xy', 'xz', 'yz']
if view is not None:
assert view in allowed_views, 'The passed view argument: {} is not among the allowed views: {}'.format(
view, allowed_views)
if ax is None:
fig = plt.figure()
if view is None:
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(111, projection='rectilinear')
elif view is None:
assert ax.name == '3d', 'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'
else:
assert ax.name != '3d', 'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'
lims_min = []
lims_max = []
for tree_idx in tree_inds:
edges = self.edges[tree_idx].copy()
nodes = self.nodes[tree_idx].copy()
if len(nodes) > 0:
nodes['position'] = nodes['position'].multiply(unit_factor)
if view == 'xy':
nodes = nodes.drop([('position', 'z')], axis=1)
elif view == 'xz':
nodes = nodes.drop([('position', 'y')], axis=1)
elif view == 'yz':
nodes = nodes.drop([('position', 'x')], axis=1)
lims_min.append(np.min(nodes['position'].values, axis=0))
lims_max.append(np.max(nodes['position'].values, axis=0))
segments = []
for edge in edges:
n0 = nodes['position'][nodes.id == edge[0]].values[0]
n1 = nodes['position'][nodes.id == edge[1]].values[0]
segment = [[c for c in n0], [c for c in n1]]
segments.append(segment)
if view is None:
line_collection = art3d.Line3DCollection(segments=
segments, colors=colors[tree_idx])
ax.add_collection3d(line_collection)
else:
line_collection = LineCollection(segments=segments,
colors=colors[tree_idx])
ax.add_collection(line_collection)
lim_min = np.min(np.array(lims_min), axis=0)
lim_max = np.max(np.array(lims_max), axis=0)
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
if view is None:
ax.set_zlim(lim_min[2], lim_max[2])
else:
ax.set_aspect('equal')
if show:
plt.show()
return ax
def write_nml(self, nml_write_path):
""" Writes the present state of the skeleton object to a .nml file.
Args:
nml_write_path: Path to which .nml file should be written
"""
if self.num_trees() == 0:
self.add_tree()
nml = self._skeleton_to_nml()
with open(nml_write_path, 'wb') as f:
wknml.write_nml(f, nml)
def node_id_to_idx(self, node_id: int) ->(int, int):
""" Returns the linear tree and node indices for the provided node id."""
node_idx = None
for tree_idx, nodes in enumerate(self.nodes):
index_list = nodes[nodes['id'] == node_id].index.tolist()
if index_list:
node_idx = index_list[0]
break
assert node_idx is not None, 'node id {} does not exist'.format(node_id
)
return node_idx, tree_idx
def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:
""" Returns the node id for the provided tree and node idx."""
node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]
return node_id
def min_group_id(self) ->int:
""" Returns lowest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmin(group_ids))
return group_id
def max_group_id(self) ->int:
""" Returns highest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmax(group_ids))
return group_id
def min_node_id(self) ->int:
""" Returns lowest global node id."""
if len(self.nodes) > 0:
min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for
nodes in self.nodes])
else:
min_node_id = 0
return min_node_id
def max_node_id(self) ->int:
""" Returns highest global node id."""
if len(self.nodes) > 0:
max_node_id = max([(max(nodes.id) if len(nodes) > 0 else 0) for
nodes in self.nodes])
else:
max_node_id = 0
return max_node_id
def min_tree_id(self) ->int:
""" Returns lowest global tree id."""
return min(self.tree_ids) if len(self.tree_ids) > 0 else 0
def max_tree_id(self) ->int:
""" Returns highest global tree id."""
return max(self.tree_ids) if len(self.tree_ids) > 0 else 0
def num_trees(self) ->int:
"""Returns number of trees contained in skeleton object."""
return len(self.nodes)
def groups_ids(self) ->List[int]:
""" Returns all ids defined in groups tree"""
_, groups_ids = Skeleton._group_get_ids(self.groups)
return groups_ids
def _get_unit_factor(self, unit: str) ->np.ndarray:
""" Returns factor for unit conversion
Args:
unit: Unit for which to return the conversion factor.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)
Returns:
unit_factor (shape=(3,)): Unit conversion factors
"""
unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.
parameters.scale), 'um': np.array(self.parameters.scale) / 1000}
assert unit in unit_factors.keys(), 'Invalid unit'
unit_factor = unit_factors[unit]
return unit_factor
def _reset_node_ids(self, start_id: int):
""" Resets node ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest node id should be set.
"""
add_id = start_id - self.min_node_id()
for tree_idx, _ in enumerate(self.nodes):
self.nodes[tree_idx].nodes['id'] += add_id
self.edges[tree_idx] += add_id
def _reset_tree_ids(self, start_id: int):
""" Resets tree ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest tree id should be set.
"""
add_id = start_id - self.min_tree_id()
self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]
def _reset_group_ids(self, start_id: int):
""" Resets group ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest group id should be set.
"""
min_group_id = self.min_group_id()
if min_group_id is not None:
add_id = start_id - min_group_id
self.group_ids = [(i + add_id if i is not None else i) for i in
self.group_ids]
self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda
x: x + add_id) for group in self.groups]
<|reserved_special_token_0|>
def _nml_to_skeleton(self, nml):
""" Converts wknml to skeleton data structures."""
self.groups = nml.groups
self.branchpoints = nml.branchpoints
self.parameters = Parameters(**nml.parameters._asdict())
for tree in nml.trees:
self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree
.nodes, nml_comments=nml.comments), edges=np.array([(edge.
source, edge.target) for edge in tree.edges]), group_id=
tree.groupId, name=tree.name, color=tree.color)
def _skeleton_to_nml(self):
""" Converts skeleton to wknml data structures."""
trees = []
for tree_idx, tree_id in enumerate(self.tree_ids):
nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])
nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])
tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name
=self.names[tree_idx], groupId=self.group_ids[tree_idx],
nodes=nml_nodes, edges=nml_edges)
trees.append(tree)
nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.
_asdict()), trees=trees, branchpoints=self.branchpoints,
comments=self._skeleton_to_nml_comments(), groups=self.groups)
return nml
def _skeleton_to_nml_comments(self):
""" Converts skeleton to wknml comments."""
nml_comments = []
for nodes in self.nodes:
comment_nodes = nodes[nodes['comment'].notnull()]
for _, row in comment_nodes.iterrows():
nml_comment = wknml.Comment(node=row['id'].values[0],
content=row['comment'].values[0])
nml_comments.append(nml_comment)
return nml_comments
@staticmethod
def define_parameters(name: str, scale: Tuple[float, float, float],
offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,
editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),
editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:
float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=
None, userBoundingBox: Tuple[int, int, int, int, int, int]=None
) ->Parameters:
parameters = Parameters(name=name, scale=scale, offset=offset, time
=time, editPosition=editPosition, editRotation=editRotation,
zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,
userBoundingBox=userBoundingBox)
return parameters
@staticmethod
def get_distance(positions: np.ndarray, position: np.ndarray,
unit_factor: np.ndarray=None):
""" Get the (euclidean) distances between positions and a target position
Args:
positions (N x 3): Array holding (multiple) x, y, z positions
position (1 x 3): Array holding x, y, z position to which the distances should be computed
unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)
Returns:
distances: Arrays holding distances
"""
if unit_factor is None:
unit_factor = np.array([1, 1, 1])
distances = np.sqrt(np.sum(((positions - position) * unit_factor.
reshape(1, 3)) ** 2, axis=1))
return distances
@staticmethod
def _nml_nodes_to_nodes(nml_nodes, nml_comments):
""" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass)."""
data = [(node.id, node.position[0], node.position[1], node.position
[2], node.radius, node.rotation[0], node.rotation[1], node.
rotation[2], node.inVp, node.inMag, node.bitDepth, node.
interpolation, node.time, np.nan) for node in nml_nodes]
nodes = Nodes(data=data)
comment_node_ids = [comment.node for comment in nml_comments]
comment_strings = [comment.content for comment in nml_comments]
nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]
for id in nodes_ids_comments:
id_comment = comment_strings[comment_node_ids.index(id)]
nodes.loc[nodes.id == id, ('comment', '')] = id_comment
return nodes
@staticmethod
def _nodes_to_nml_nodes(nodes):
""" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples)."""
nml_nodes = []
for idx, row in nodes.iterrows():
nml_node = wknml.Node(id=int(row.id), position=tuple(row.
position.values), radius=float(row.radius), rotation=tuple(
row.rotation.values), inVp=int(row.inVp), inMag=int(row.
inMag), bitDepth=int(row.bitDepth), interpolation=bool(row.
interpolation.values), time=int(row.time))
nml_nodes.append(nml_node)
return nml_nodes
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def _group_parent(groups, id, parent_id=None, parent_idx=None,
child_idx=None):
""" Returns the id of the parent group for a (child) group with specified id."""
for group in groups:
if id in [x.id for x in group.children]:
parent_id = group.id
parent_idx = groups.index(group)
child_idx = [x.id for x in group.children].index(id)
else:
parent_id, parent_idx, child_idx = Skeleton._group_parent(group
.children, id, parent_id, parent_idx, child_idx)
return parent_id, parent_idx, child_idx
@staticmethod
def _group_modify_id(group, id_modifier):
""" Modifies group ids with the passed id_modifier (e.g. lambda) function."""
group = group._replace(id=id_modifier(group.id))
group = group._replace(children=list(map(lambda g: Skeleton.
_group_modify_id(g, id_modifier), group.children)))
return group
@staticmethod
def _group_get_ids(groups, ids=[]):
for group in groups:
ids.append(group.id)
Skeleton._group_get_ids(group.children, ids)
return groups, ids
@staticmethod
def _get_graph(nodes: Nodes, edges: np.ndarray):
""" Returns the networkx graph representation of provided nodes and edges."""
graph = nx.Graph()
graph.add_nodes_from(nodes['id'])
attrs = nodes.set_index('id').to_dict('index')
nx.set_node_attributes(graph, attrs)
graph.add_edges_from(edges)
return graph
@staticmethod
def _num_conn_comp(graph):
""" Returns number of connected components for graph"""
return nx.number_connected_components(graph)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Skeleton:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, nml_path: str=None, parameters: Parameters=None,
strict=True):
""" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments
Args:
nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the
trees and additional properties specified in the .nml file
parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties
of the skeleton.
strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with
webKnossos. Default: True
Examples:
Using nml_path:
nml_path = '/path/to/example.nml'
skel = Skeleton(nml_path)
Using parameters:
parameters = Skeleton.define_parameters(name="2017-01-12_FD0156-2", scale=(11.24, 11.24, 32))
skel = Skeleton(parameters=parameters)
"""
assert (nml_path is not None) ^ (parameters is not None
), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'
self.nodes = list()
self.edges = list()
self.names = list()
self.colors = list()
self.tree_ids = list()
self.group_ids = list()
self.groups = list()
self.branchpoints = list()
self.parameters = Parameters()
self.nml_path = str()
self.strict = strict
self.defaults = self.DEFAULTS
if nml_path is not None:
assert os.path.exists(nml_path), 'not a valid path: {}'.format(
nml_path)
try:
with open(nml_path, 'rb') as f:
nml = wknml.parse_nml(f)
except IOError:
print('not a valid nml file: {}'.format(nml_path))
self._nml_to_skeleton(nml)
else:
assert type(parameters
) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'
self._parameters_to_skeleton(parameters)
def add_tree(self, nodes: Nodes=Nodes(), edges: Union[List[Tuple[int,
int]], np.ndarray]=None, tree_id: int=None, name: str='', group_id:
int=None, color: Tuple[float, float, float, float]=None):
""" Appends new tree to skeleton.
Args:
nodes (optional): Nodes representing tree to be added
edges (optional): Edges representing tree to be added
tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1
name (optional): Name to be used for new tree. Default: Empty str
group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.
Default: None
color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if edges is None:
edges = np.empty((0, 2), dtype=np.uint32)
elif type(edges) is list:
edges = np.asarray(edges)
if self.strict & (len(nodes) > 1):
assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)
) == 1, 'Added tree consists of more than one connected component'
if tree_id is None:
tree_id = self.max_tree_id() + 1
if (group_id is not None) & (group_id not in self.groups_ids()):
self.add_group(id=group_id)
if color is None:
color = self.defaults['tree']['color']
self.nodes.append(nodes)
self.edges.append(edges)
self.tree_ids.append(tree_id)
self.group_ids.append(group_id)
self.names.append(name)
self.colors.append(color)
def add_tree_from_skel(self, skel: 'Skeleton', tree_idx: int, group_id:
int=None, name: str=None):
""" Appends a specific tree contained in a different skeleton object to the skeleton.
Args:
skel: Source skeleton object (different from the one calling this method) to be added
tree_idx: Source tree index of tree to be added
group_id (optional): Target group id to which the added tree should be assigned. Default: None
name (optional): Target name for the added tree
"""
if group_id not in self.groups_ids():
self.add_group(id=group_id)
if name is None:
name = skel.names[tree_idx]
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
self.nodes = self.nodes + [skel.nodes[tree_idx]]
self.edges = self.edges + [skel.edges[tree_idx]]
self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]
self.group_ids = self.group_ids + [group_id]
self.names = self.names + [name]
self.colors = self.colors + [skel.colors[tree_idx]]
return self
def add_trees_from_skel(self, skel: 'Skeleton'):
""" Appends all trees contained in a different skeleton object to the skeleton.
This method attempts to preserve the relative group structure found in the skeleton object to be added
Args:
skel: Source skeleton object (different from the one calling this method) to be added
"""
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
max_group_id = self.max_group_id()
if max_group_id is not None:
skel._reset_group_ids(max_group_id + 1)
self.nodes = self.nodes + skel.nodes
self.edges = self.edges + skel.edges
self.tree_ids = self.tree_ids + skel.tree_ids
self.group_ids = self.group_ids + skel.group_ids
self.groups = self.groups + skel.groups
self.names = self.names + skel.names
self.colors = self.colors + skel.colors
return self
def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,
group_ids: List[int]=None, names: List[str]=None, colors: List[
Tuple[float, float, float, float]]=None):
""" Appends each of the specified nodes as separate trees to the skeleton (1 node each).
Args:
nodes: Nodes representing the trees to be added
tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]
group_ids (optional): Group ids to be assigned to the newly added trees. Default: None
names (optional): Names to be assigned to the newly added trees.
colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if tree_ids is None:
tree_id_start = self.max_tree_id() + 1
tree_id_end = tree_id_start + len(nodes)
tree_ids = list(range(tree_id_start, tree_id_end))
if group_ids is None:
group_ids = [None for x in range(len(nodes))]
if names is None:
names = ['' for x in range(len(nodes))]
if colors is None:
colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]
for node_idx, _ in nodes.iterrows():
self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=
tree_ids[node_idx], group_id=group_ids[node_idx], name=
names[node_idx], color=colors[node_idx])
def delete_tree(self, idx: int=None, id: int=None):
""" Deletes tree with specified idx or id.
Args:
idx: Linear index of tree to be deleted
id: Id of tree to be deleted
"""
if id is not None:
idx = self.tree_ids.index(id)
self.nodes.pop(idx)
self.edges.pop(idx)
self.names.pop(idx)
self.colors.pop(idx)
self.tree_ids.pop(idx)
self.group_ids.pop(idx)
def add_group(self, parent_id: int=None, id: int=None, name: str=None):
""" Adds a new group to skeleton object.
Args:
parent_id: Parent group id to which new group is added as a child. Default: None (root group)
id: Id of new group to be added. Default: Current max group id + 1
name: Name of new group to be added. Default: 'Group {}'.format(id)
Returns:
id: Id of added group
name: Name of added group
"""
if parent_id is not None:
assert parent_id in self.group_ids, 'Parent id does not exist'
if id is None:
id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)
else:
assert id not in self.groups_ids(), 'Id already exists'
if name is None:
name = 'Group {}'.format(id)
new_group = wknml.Group(id, name, [])
if parent_id is None:
self.groups.append(new_group)
else:
self.groups = Skeleton._group_append(self.groups, parent_id,
new_group)
return id, name
def delete_group(self, id, target_id):
pass
def define_nodes(self, position_x: List[int], position_y: List[int],
position_z: List[int], id: List[int]=None, radius: Optional[List[
int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:
Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,
inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,
bitDepth: Optional[List[int]]=None, interpolation: Optional[List[
bool]]=None, time: Optional[List[int]]=None, comment: Optional[List
[int]]=None) ->Nodes:
""" Generates new nodes table from data.
Args:
position_x: Node position x
position_y: Node position y
position_z: Node position z
id (optional): (Globally unique) Node id. Default: New unique ids are generated
radius (optional): Node radius
rotation_x (optional): Node rotation x
rotation_y (optional): Node rotation y
rotation_z (optional): Node rotation z
inVP (optional): Viewport index in which node was placed
inMag (optional): (De-)Magnification factor in which node was placed
bitDepth (optional): Bit (Color) Depth in which node was placed
interpolation (optional): Interpolation state in which node was placed
time (optional): Time stamp at which node was placed
comment (optional): Comment associated with node
Returns:
nodes: Nodes object
"""
if id is None:
id_max = self.max_node_id()
id = list(range(id_max + 1, id_max + len(position_x) + 1))
nodes = Nodes.from_list(id, position_x, position_y, position_z,
radius, rotation_x, rotation_y, rotation_z, inVP, inMag,
bitDepth, interpolation, time, comment)
return nodes
def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:
""" Generates new nodes table from positions only (node ids are generated automatically).
Args:
positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table
Returns:
nodes: Nodes object
"""
id_max = self.max_node_id()
id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)
).reshape(-1, 1)
nodes = Nodes.from_numpy(np.append(id, positions, axis=1))
return nodes
def get_distances_to_node(self, positions: Union[Sequence[Tuple[int,
int, int]], np.ndarray], node_id: int=None, tree_idx: int=None,
node_idx: int=None, unit: str='um') ->List[np.ndarray]:
""" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions
Args:
positions (N x 3): Target (x,y,z) positions to which the distances should be computed
node_id: Node id of the node for which the distances should be computed
tree_idx: Tree idx of the node for which the distances should be computed
node_idx: Node idx of the node for which the distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
assert (node_id is not None) ^ (tree_idx is not None) & (node_idx
is not None
), 'Either provide node_id or both tree_idx and node_idx'
if type(positions) is not np.ndarray:
positions = np.array(positions)
if node_id is not None:
node_idx, tree_idx = self.node_id_to_idx(node_id)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(positions, np.array(self.nodes[
tree_idx].position.values[node_idx]), unit_factor)
return distances
def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],
np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:
""" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position
Args:
position (1 x 3): Target (x,y,z) position to which the node distances should be computed
tree_idx: Tree idx for which node distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
if type(position) is not np.ndarray:
position = np.array(position)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].
position.values), position, unit_factor)
return distances
def get_graph(self, tree_idx):
""" Returns the networkx graph representation of a tree.
Args:
tree_idx: Linear index of the tree to be returned as graph object
Returns:
graph: Graph object
"""
nodes = self.nodes[tree_idx]
edges = self.edges[tree_idx]
graph = Skeleton._get_graph(nodes, edges)
return graph
def get_shortest_path(self, node_id_start: int, node_id_end: int) ->List[
int]:
""" Returns the shortest path between two nodes of a tree.
Args:
node_id_start: Node id of start node
node_id_end: Node id of end node
Returns:
shortest_path: Node indices comprising the shortest path
"""
_, tree_idx_start = self.node_id_to_idx(node_id_start)
_, tree_idx_end = self.node_id_to_idx(node_id_end)
assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'
graph = self.get_graph(tree_idx_start)
shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)
return shortest_path
def plot(self, tree_inds: Union[int, List[int]]=None, view: str=None,
colors: Union[Tuple[float, float, float, float], List[Tuple[float,
float, float, float]], str]=None, unit: str='um', show: bool=True,
ax: plt.axes=None):
""" Generates a (3D) line plot of the trees contained in the skeleton object.
Args:
tree_inds (optional): Tree indices to be plotted.
Default: All trees are plotted
view (optional): Plot as 2D projection on orthonormal plane.
Options: 'xy', 'xz', 'yz'
Default: Plot as 3D projection
colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is
broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.
Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can
be passed as a str.
Default: Skeleton colors (self.colors) are used
unit (optional): Specifies in which unit the plot should be generated.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).
Default: 'um' (micrometer)
show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set
to False. Default: True
ax: Axes to be plotted on.
Returns:
ax: Axes which was plotted on
"""
if tree_inds is None:
tree_inds = list(range(len(self.nodes)))
elif tree_inds is int:
tree_inds = [tree_inds]
if colors is None:
colors = self.colors
elif type(colors) is str:
cmap = cm.get_cmap(colors)
colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]
elif type(colors[0]) is not Sequence:
colors = [colors] * self.num_trees()
unit_factor = self._get_unit_factor(unit)
allowed_views = ['xy', 'xz', 'yz']
if view is not None:
assert view in allowed_views, 'The passed view argument: {} is not among the allowed views: {}'.format(
view, allowed_views)
if ax is None:
fig = plt.figure()
if view is None:
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(111, projection='rectilinear')
elif view is None:
assert ax.name == '3d', 'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'
else:
assert ax.name != '3d', 'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'
lims_min = []
lims_max = []
for tree_idx in tree_inds:
edges = self.edges[tree_idx].copy()
nodes = self.nodes[tree_idx].copy()
if len(nodes) > 0:
nodes['position'] = nodes['position'].multiply(unit_factor)
if view == 'xy':
nodes = nodes.drop([('position', 'z')], axis=1)
elif view == 'xz':
nodes = nodes.drop([('position', 'y')], axis=1)
elif view == 'yz':
nodes = nodes.drop([('position', 'x')], axis=1)
lims_min.append(np.min(nodes['position'].values, axis=0))
lims_max.append(np.max(nodes['position'].values, axis=0))
segments = []
for edge in edges:
n0 = nodes['position'][nodes.id == edge[0]].values[0]
n1 = nodes['position'][nodes.id == edge[1]].values[0]
segment = [[c for c in n0], [c for c in n1]]
segments.append(segment)
if view is None:
line_collection = art3d.Line3DCollection(segments=
segments, colors=colors[tree_idx])
ax.add_collection3d(line_collection)
else:
line_collection = LineCollection(segments=segments,
colors=colors[tree_idx])
ax.add_collection(line_collection)
lim_min = np.min(np.array(lims_min), axis=0)
lim_max = np.max(np.array(lims_max), axis=0)
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
if view is None:
ax.set_zlim(lim_min[2], lim_max[2])
else:
ax.set_aspect('equal')
if show:
plt.show()
return ax
def write_nml(self, nml_write_path):
""" Writes the present state of the skeleton object to a .nml file.
Args:
nml_write_path: Path to which .nml file should be written
"""
if self.num_trees() == 0:
self.add_tree()
nml = self._skeleton_to_nml()
with open(nml_write_path, 'wb') as f:
wknml.write_nml(f, nml)
def node_id_to_idx(self, node_id: int) ->(int, int):
""" Returns the linear tree and node indices for the provided node id."""
node_idx = None
for tree_idx, nodes in enumerate(self.nodes):
index_list = nodes[nodes['id'] == node_id].index.tolist()
if index_list:
node_idx = index_list[0]
break
assert node_idx is not None, 'node id {} does not exist'.format(node_id
)
return node_idx, tree_idx
def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:
""" Returns the node id for the provided tree and node idx."""
node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]
return node_id
def min_group_id(self) ->int:
""" Returns lowest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmin(group_ids))
return group_id
def max_group_id(self) ->int:
""" Returns highest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmax(group_ids))
return group_id
def min_node_id(self) ->int:
""" Returns lowest global node id."""
if len(self.nodes) > 0:
min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for
nodes in self.nodes])
else:
min_node_id = 0
return min_node_id
def max_node_id(self) ->int:
""" Returns highest global node id."""
if len(self.nodes) > 0:
max_node_id = max([(max(nodes.id) if len(nodes) > 0 else 0) for
nodes in self.nodes])
else:
max_node_id = 0
return max_node_id
def min_tree_id(self) ->int:
""" Returns lowest global tree id."""
return min(self.tree_ids) if len(self.tree_ids) > 0 else 0
def max_tree_id(self) ->int:
""" Returns highest global tree id."""
return max(self.tree_ids) if len(self.tree_ids) > 0 else 0
def num_trees(self) ->int:
"""Returns number of trees contained in skeleton object."""
return len(self.nodes)
def groups_ids(self) ->List[int]:
""" Returns all ids defined in groups tree"""
_, groups_ids = Skeleton._group_get_ids(self.groups)
return groups_ids
def _get_unit_factor(self, unit: str) ->np.ndarray:
""" Returns factor for unit conversion
Args:
unit: Unit for which to return the conversion factor.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)
Returns:
unit_factor (shape=(3,)): Unit conversion factors
"""
unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.
parameters.scale), 'um': np.array(self.parameters.scale) / 1000}
assert unit in unit_factors.keys(), 'Invalid unit'
unit_factor = unit_factors[unit]
return unit_factor
def _reset_node_ids(self, start_id: int):
""" Resets node ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest node id should be set.
"""
add_id = start_id - self.min_node_id()
for tree_idx, _ in enumerate(self.nodes):
self.nodes[tree_idx].nodes['id'] += add_id
self.edges[tree_idx] += add_id
def _reset_tree_ids(self, start_id: int):
""" Resets tree ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest tree id should be set.
"""
add_id = start_id - self.min_tree_id()
self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]
def _reset_group_ids(self, start_id: int):
""" Resets group ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest group id should be set.
"""
min_group_id = self.min_group_id()
if min_group_id is not None:
add_id = start_id - min_group_id
self.group_ids = [(i + add_id if i is not None else i) for i in
self.group_ids]
self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda
x: x + add_id) for group in self.groups]
<|reserved_special_token_0|>
def _nml_to_skeleton(self, nml):
""" Converts wknml to skeleton data structures."""
self.groups = nml.groups
self.branchpoints = nml.branchpoints
self.parameters = Parameters(**nml.parameters._asdict())
for tree in nml.trees:
self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree
.nodes, nml_comments=nml.comments), edges=np.array([(edge.
source, edge.target) for edge in tree.edges]), group_id=
tree.groupId, name=tree.name, color=tree.color)
def _skeleton_to_nml(self):
""" Converts skeleton to wknml data structures."""
trees = []
for tree_idx, tree_id in enumerate(self.tree_ids):
nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])
nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])
tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name
=self.names[tree_idx], groupId=self.group_ids[tree_idx],
nodes=nml_nodes, edges=nml_edges)
trees.append(tree)
nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.
_asdict()), trees=trees, branchpoints=self.branchpoints,
comments=self._skeleton_to_nml_comments(), groups=self.groups)
return nml
def _skeleton_to_nml_comments(self):
""" Converts skeleton to wknml comments."""
nml_comments = []
for nodes in self.nodes:
comment_nodes = nodes[nodes['comment'].notnull()]
for _, row in comment_nodes.iterrows():
nml_comment = wknml.Comment(node=row['id'].values[0],
content=row['comment'].values[0])
nml_comments.append(nml_comment)
return nml_comments
@staticmethod
def define_parameters(name: str, scale: Tuple[float, float, float],
offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,
editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),
editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:
float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=
None, userBoundingBox: Tuple[int, int, int, int, int, int]=None
) ->Parameters:
parameters = Parameters(name=name, scale=scale, offset=offset, time
=time, editPosition=editPosition, editRotation=editRotation,
zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,
userBoundingBox=userBoundingBox)
return parameters
@staticmethod
def get_distance(positions: np.ndarray, position: np.ndarray,
unit_factor: np.ndarray=None):
""" Get the (euclidean) distances between positions and a target position
Args:
positions (N x 3): Array holding (multiple) x, y, z positions
position (1 x 3): Array holding x, y, z position to which the distances should be computed
unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)
Returns:
distances: Arrays holding distances
"""
if unit_factor is None:
unit_factor = np.array([1, 1, 1])
distances = np.sqrt(np.sum(((positions - position) * unit_factor.
reshape(1, 3)) ** 2, axis=1))
return distances
@staticmethod
def _nml_nodes_to_nodes(nml_nodes, nml_comments):
""" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass)."""
data = [(node.id, node.position[0], node.position[1], node.position
[2], node.radius, node.rotation[0], node.rotation[1], node.
rotation[2], node.inVp, node.inMag, node.bitDepth, node.
interpolation, node.time, np.nan) for node in nml_nodes]
nodes = Nodes(data=data)
comment_node_ids = [comment.node for comment in nml_comments]
comment_strings = [comment.content for comment in nml_comments]
nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]
for id in nodes_ids_comments:
id_comment = comment_strings[comment_node_ids.index(id)]
nodes.loc[nodes.id == id, ('comment', '')] = id_comment
return nodes
@staticmethod
def _nodes_to_nml_nodes(nodes):
""" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples)."""
nml_nodes = []
for idx, row in nodes.iterrows():
nml_node = wknml.Node(id=int(row.id), position=tuple(row.
position.values), radius=float(row.radius), rotation=tuple(
row.rotation.values), inVp=int(row.inVp), inMag=int(row.
inMag), bitDepth=int(row.bitDepth), interpolation=bool(row.
interpolation.values), time=int(row.time))
nml_nodes.append(nml_node)
return nml_nodes
<|reserved_special_token_0|>
@staticmethod
def _group_append(groups, id, new_group):
""" Appends new group as a child of existing group with specified id. Currently only works up to depth=3."""
path_inds = []
_, _, idx = Skeleton._group_parent(groups, id)
while id is not None:
path_inds.append(idx)
id, idx, _ = Skeleton._group_parent(groups, id)
path_inds = list(reversed(path_inds))
if len(path_inds) == 1:
groups[path_inds[0]]._replace(children=new_group)
elif len(path_inds) == 2:
groups[path_inds[0]].children[path_inds[1]]._replace(children=
new_group)
elif len(path_inds) == 3:
groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]
]._replace(children=new_group)
return groups
@staticmethod
def _group_parent(groups, id, parent_id=None, parent_idx=None,
child_idx=None):
""" Returns the id of the parent group for a (child) group with specified id."""
for group in groups:
if id in [x.id for x in group.children]:
parent_id = group.id
parent_idx = groups.index(group)
child_idx = [x.id for x in group.children].index(id)
else:
parent_id, parent_idx, child_idx = Skeleton._group_parent(group
.children, id, parent_id, parent_idx, child_idx)
return parent_id, parent_idx, child_idx
@staticmethod
def _group_modify_id(group, id_modifier):
""" Modifies group ids with the passed id_modifier (e.g. lambda) function."""
group = group._replace(id=id_modifier(group.id))
group = group._replace(children=list(map(lambda g: Skeleton.
_group_modify_id(g, id_modifier), group.children)))
return group
@staticmethod
def _group_get_ids(groups, ids=[]):
for group in groups:
ids.append(group.id)
Skeleton._group_get_ids(group.children, ids)
return groups, ids
@staticmethod
def _get_graph(nodes: Nodes, edges: np.ndarray):
""" Returns the networkx graph representation of provided nodes and edges."""
graph = nx.Graph()
graph.add_nodes_from(nodes['id'])
attrs = nodes.set_index('id').to_dict('index')
nx.set_node_attributes(graph, attrs)
graph.add_edges_from(edges)
return graph
@staticmethod
def _num_conn_comp(graph):
""" Returns number of connected components for graph"""
return nx.number_connected_components(graph)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Skeleton:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, nml_path: str=None, parameters: Parameters=None,
strict=True):
""" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments
Args:
nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the
trees and additional properties specified in the .nml file
parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties
of the skeleton.
strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with
webKnossos. Default: True
Examples:
Using nml_path:
nml_path = '/path/to/example.nml'
skel = Skeleton(nml_path)
Using parameters:
parameters = Skeleton.define_parameters(name="2017-01-12_FD0156-2", scale=(11.24, 11.24, 32))
skel = Skeleton(parameters=parameters)
"""
assert (nml_path is not None) ^ (parameters is not None
), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'
self.nodes = list()
self.edges = list()
self.names = list()
self.colors = list()
self.tree_ids = list()
self.group_ids = list()
self.groups = list()
self.branchpoints = list()
self.parameters = Parameters()
self.nml_path = str()
self.strict = strict
self.defaults = self.DEFAULTS
if nml_path is not None:
assert os.path.exists(nml_path), 'not a valid path: {}'.format(
nml_path)
try:
with open(nml_path, 'rb') as f:
nml = wknml.parse_nml(f)
except IOError:
print('not a valid nml file: {}'.format(nml_path))
self._nml_to_skeleton(nml)
else:
assert type(parameters
) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'
self._parameters_to_skeleton(parameters)
def add_tree(self, nodes: Nodes=Nodes(), edges: Union[List[Tuple[int,
int]], np.ndarray]=None, tree_id: int=None, name: str='', group_id:
int=None, color: Tuple[float, float, float, float]=None):
""" Appends new tree to skeleton.
Args:
nodes (optional): Nodes representing tree to be added
edges (optional): Edges representing tree to be added
tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1
name (optional): Name to be used for new tree. Default: Empty str
group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.
Default: None
color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if edges is None:
edges = np.empty((0, 2), dtype=np.uint32)
elif type(edges) is list:
edges = np.asarray(edges)
if self.strict & (len(nodes) > 1):
assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)
) == 1, 'Added tree consists of more than one connected component'
if tree_id is None:
tree_id = self.max_tree_id() + 1
if (group_id is not None) & (group_id not in self.groups_ids()):
self.add_group(id=group_id)
if color is None:
color = self.defaults['tree']['color']
self.nodes.append(nodes)
self.edges.append(edges)
self.tree_ids.append(tree_id)
self.group_ids.append(group_id)
self.names.append(name)
self.colors.append(color)
def add_tree_from_skel(self, skel: 'Skeleton', tree_idx: int, group_id:
int=None, name: str=None):
""" Appends a specific tree contained in a different skeleton object to the skeleton.
Args:
skel: Source skeleton object (different from the one calling this method) to be added
tree_idx: Source tree index of tree to be added
group_id (optional): Target group id to which the added tree should be assigned. Default: None
name (optional): Target name for the added tree
"""
if group_id not in self.groups_ids():
self.add_group(id=group_id)
if name is None:
name = skel.names[tree_idx]
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
self.nodes = self.nodes + [skel.nodes[tree_idx]]
self.edges = self.edges + [skel.edges[tree_idx]]
self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]
self.group_ids = self.group_ids + [group_id]
self.names = self.names + [name]
self.colors = self.colors + [skel.colors[tree_idx]]
return self
def add_trees_from_skel(self, skel: 'Skeleton'):
""" Appends all trees contained in a different skeleton object to the skeleton.
This method attempts to preserve the relative group structure found in the skeleton object to be added
Args:
skel: Source skeleton object (different from the one calling this method) to be added
"""
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
max_group_id = self.max_group_id()
if max_group_id is not None:
skel._reset_group_ids(max_group_id + 1)
self.nodes = self.nodes + skel.nodes
self.edges = self.edges + skel.edges
self.tree_ids = self.tree_ids + skel.tree_ids
self.group_ids = self.group_ids + skel.group_ids
self.groups = self.groups + skel.groups
self.names = self.names + skel.names
self.colors = self.colors + skel.colors
return self
def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,
group_ids: List[int]=None, names: List[str]=None, colors: List[
Tuple[float, float, float, float]]=None):
""" Appends each of the specified nodes as separate trees to the skeleton (1 node each).
Args:
nodes: Nodes representing the trees to be added
tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]
group_ids (optional): Group ids to be assigned to the newly added trees. Default: None
names (optional): Names to be assigned to the newly added trees.
colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if tree_ids is None:
tree_id_start = self.max_tree_id() + 1
tree_id_end = tree_id_start + len(nodes)
tree_ids = list(range(tree_id_start, tree_id_end))
if group_ids is None:
group_ids = [None for x in range(len(nodes))]
if names is None:
names = ['' for x in range(len(nodes))]
if colors is None:
colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]
for node_idx, _ in nodes.iterrows():
self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=
tree_ids[node_idx], group_id=group_ids[node_idx], name=
names[node_idx], color=colors[node_idx])
def delete_tree(self, idx: int=None, id: int=None):
""" Deletes tree with specified idx or id.
Args:
idx: Linear index of tree to be deleted
id: Id of tree to be deleted
"""
if id is not None:
idx = self.tree_ids.index(id)
self.nodes.pop(idx)
self.edges.pop(idx)
self.names.pop(idx)
self.colors.pop(idx)
self.tree_ids.pop(idx)
self.group_ids.pop(idx)
def add_group(self, parent_id: int=None, id: int=None, name: str=None):
""" Adds a new group to skeleton object.
Args:
parent_id: Parent group id to which new group is added as a child. Default: None (root group)
id: Id of new group to be added. Default: Current max group id + 1
name: Name of new group to be added. Default: 'Group {}'.format(id)
Returns:
id: Id of added group
name: Name of added group
"""
if parent_id is not None:
assert parent_id in self.group_ids, 'Parent id does not exist'
if id is None:
id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)
else:
assert id not in self.groups_ids(), 'Id already exists'
if name is None:
name = 'Group {}'.format(id)
new_group = wknml.Group(id, name, [])
if parent_id is None:
self.groups.append(new_group)
else:
self.groups = Skeleton._group_append(self.groups, parent_id,
new_group)
return id, name
def delete_group(self, id, target_id):
pass
def define_nodes(self, position_x: List[int], position_y: List[int],
position_z: List[int], id: List[int]=None, radius: Optional[List[
int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:
Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,
inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,
bitDepth: Optional[List[int]]=None, interpolation: Optional[List[
bool]]=None, time: Optional[List[int]]=None, comment: Optional[List
[int]]=None) ->Nodes:
""" Generates new nodes table from data.
Args:
position_x: Node position x
position_y: Node position y
position_z: Node position z
id (optional): (Globally unique) Node id. Default: New unique ids are generated
radius (optional): Node radius
rotation_x (optional): Node rotation x
rotation_y (optional): Node rotation y
rotation_z (optional): Node rotation z
inVP (optional): Viewport index in which node was placed
inMag (optional): (De-)Magnification factor in which node was placed
bitDepth (optional): Bit (Color) Depth in which node was placed
interpolation (optional): Interpolation state in which node was placed
time (optional): Time stamp at which node was placed
comment (optional): Comment associated with node
Returns:
nodes: Nodes object
"""
if id is None:
id_max = self.max_node_id()
id = list(range(id_max + 1, id_max + len(position_x) + 1))
nodes = Nodes.from_list(id, position_x, position_y, position_z,
radius, rotation_x, rotation_y, rotation_z, inVP, inMag,
bitDepth, interpolation, time, comment)
return nodes
def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:
""" Generates new nodes table from positions only (node ids are generated automatically).
Args:
positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table
Returns:
nodes: Nodes object
"""
id_max = self.max_node_id()
id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)
).reshape(-1, 1)
nodes = Nodes.from_numpy(np.append(id, positions, axis=1))
return nodes
def get_distances_to_node(self, positions: Union[Sequence[Tuple[int,
int, int]], np.ndarray], node_id: int=None, tree_idx: int=None,
node_idx: int=None, unit: str='um') ->List[np.ndarray]:
""" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions
Args:
positions (N x 3): Target (x,y,z) positions to which the distances should be computed
node_id: Node id of the node for which the distances should be computed
tree_idx: Tree idx of the node for which the distances should be computed
node_idx: Node idx of the node for which the distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
assert (node_id is not None) ^ (tree_idx is not None) & (node_idx
is not None
), 'Either provide node_id or both tree_idx and node_idx'
if type(positions) is not np.ndarray:
positions = np.array(positions)
if node_id is not None:
node_idx, tree_idx = self.node_id_to_idx(node_id)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(positions, np.array(self.nodes[
tree_idx].position.values[node_idx]), unit_factor)
return distances
def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],
np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:
""" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position
Args:
position (1 x 3): Target (x,y,z) position to which the node distances should be computed
tree_idx: Tree idx for which node distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
if type(position) is not np.ndarray:
position = np.array(position)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].
position.values), position, unit_factor)
return distances
def get_graph(self, tree_idx):
""" Returns the networkx graph representation of a tree.
Args:
tree_idx: Linear index of the tree to be returned as graph object
Returns:
graph: Graph object
"""
nodes = self.nodes[tree_idx]
edges = self.edges[tree_idx]
graph = Skeleton._get_graph(nodes, edges)
return graph
def get_shortest_path(self, node_id_start: int, node_id_end: int) ->List[
int]:
""" Returns the shortest path between two nodes of a tree.
Args:
node_id_start: Node id of start node
node_id_end: Node id of end node
Returns:
shortest_path: Node indices comprising the shortest path
"""
_, tree_idx_start = self.node_id_to_idx(node_id_start)
_, tree_idx_end = self.node_id_to_idx(node_id_end)
assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'
graph = self.get_graph(tree_idx_start)
shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)
return shortest_path
def plot(self, tree_inds: Union[int, List[int]]=None, view: str=None,
colors: Union[Tuple[float, float, float, float], List[Tuple[float,
float, float, float]], str]=None, unit: str='um', show: bool=True,
ax: plt.axes=None):
""" Generates a (3D) line plot of the trees contained in the skeleton object.
Args:
tree_inds (optional): Tree indices to be plotted.
Default: All trees are plotted
view (optional): Plot as 2D projection on orthonormal plane.
Options: 'xy', 'xz', 'yz'
Default: Plot as 3D projection
colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is
broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.
Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can
be passed as a str.
Default: Skeleton colors (self.colors) are used
unit (optional): Specifies in which unit the plot should be generated.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).
Default: 'um' (micrometer)
show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set
to False. Default: True
ax: Axes to be plotted on.
Returns:
ax: Axes which was plotted on
"""
if tree_inds is None:
tree_inds = list(range(len(self.nodes)))
elif tree_inds is int:
tree_inds = [tree_inds]
if colors is None:
colors = self.colors
elif type(colors) is str:
cmap = cm.get_cmap(colors)
colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]
elif type(colors[0]) is not Sequence:
colors = [colors] * self.num_trees()
unit_factor = self._get_unit_factor(unit)
allowed_views = ['xy', 'xz', 'yz']
if view is not None:
assert view in allowed_views, 'The passed view argument: {} is not among the allowed views: {}'.format(
view, allowed_views)
if ax is None:
fig = plt.figure()
if view is None:
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(111, projection='rectilinear')
elif view is None:
assert ax.name == '3d', 'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'
else:
assert ax.name != '3d', 'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'
lims_min = []
lims_max = []
for tree_idx in tree_inds:
edges = self.edges[tree_idx].copy()
nodes = self.nodes[tree_idx].copy()
if len(nodes) > 0:
nodes['position'] = nodes['position'].multiply(unit_factor)
if view == 'xy':
nodes = nodes.drop([('position', 'z')], axis=1)
elif view == 'xz':
nodes = nodes.drop([('position', 'y')], axis=1)
elif view == 'yz':
nodes = nodes.drop([('position', 'x')], axis=1)
lims_min.append(np.min(nodes['position'].values, axis=0))
lims_max.append(np.max(nodes['position'].values, axis=0))
segments = []
for edge in edges:
n0 = nodes['position'][nodes.id == edge[0]].values[0]
n1 = nodes['position'][nodes.id == edge[1]].values[0]
segment = [[c for c in n0], [c for c in n1]]
segments.append(segment)
if view is None:
line_collection = art3d.Line3DCollection(segments=
segments, colors=colors[tree_idx])
ax.add_collection3d(line_collection)
else:
line_collection = LineCollection(segments=segments,
colors=colors[tree_idx])
ax.add_collection(line_collection)
lim_min = np.min(np.array(lims_min), axis=0)
lim_max = np.max(np.array(lims_max), axis=0)
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
if view is None:
ax.set_zlim(lim_min[2], lim_max[2])
else:
ax.set_aspect('equal')
if show:
plt.show()
return ax
def write_nml(self, nml_write_path):
""" Writes the present state of the skeleton object to a .nml file.
Args:
nml_write_path: Path to which .nml file should be written
"""
if self.num_trees() == 0:
self.add_tree()
nml = self._skeleton_to_nml()
with open(nml_write_path, 'wb') as f:
wknml.write_nml(f, nml)
def node_id_to_idx(self, node_id: int) ->(int, int):
""" Returns the linear tree and node indices for the provided node id."""
node_idx = None
for tree_idx, nodes in enumerate(self.nodes):
index_list = nodes[nodes['id'] == node_id].index.tolist()
if index_list:
node_idx = index_list[0]
break
assert node_idx is not None, 'node id {} does not exist'.format(node_id
)
return node_idx, tree_idx
def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:
""" Returns the node id for the provided tree and node idx."""
node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]
return node_id
def min_group_id(self) ->int:
""" Returns lowest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmin(group_ids))
return group_id
def max_group_id(self) ->int:
""" Returns highest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmax(group_ids))
return group_id
def min_node_id(self) ->int:
""" Returns lowest global node id."""
if len(self.nodes) > 0:
min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for
nodes in self.nodes])
else:
min_node_id = 0
return min_node_id
def max_node_id(self) ->int:
""" Returns highest global node id."""
if len(self.nodes) > 0:
max_node_id = max([(max(nodes.id) if len(nodes) > 0 else 0) for
nodes in self.nodes])
else:
max_node_id = 0
return max_node_id
def min_tree_id(self) ->int:
""" Returns lowest global tree id."""
return min(self.tree_ids) if len(self.tree_ids) > 0 else 0
def max_tree_id(self) ->int:
""" Returns highest global tree id."""
return max(self.tree_ids) if len(self.tree_ids) > 0 else 0
def num_trees(self) ->int:
"""Returns number of trees contained in skeleton object."""
return len(self.nodes)
def groups_ids(self) ->List[int]:
""" Returns all ids defined in groups tree"""
_, groups_ids = Skeleton._group_get_ids(self.groups)
return groups_ids
def _get_unit_factor(self, unit: str) ->np.ndarray:
""" Returns factor for unit conversion
Args:
unit: Unit for which to return the conversion factor.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)
Returns:
unit_factor (shape=(3,)): Unit conversion factors
"""
unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.
parameters.scale), 'um': np.array(self.parameters.scale) / 1000}
assert unit in unit_factors.keys(), 'Invalid unit'
unit_factor = unit_factors[unit]
return unit_factor
def _reset_node_ids(self, start_id: int):
""" Resets node ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest node id should be set.
"""
add_id = start_id - self.min_node_id()
for tree_idx, _ in enumerate(self.nodes):
self.nodes[tree_idx].nodes['id'] += add_id
self.edges[tree_idx] += add_id
def _reset_tree_ids(self, start_id: int):
""" Resets tree ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest tree id should be set.
"""
add_id = start_id - self.min_tree_id()
self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]
def _reset_group_ids(self, start_id: int):
""" Resets group ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest group id should be set.
"""
min_group_id = self.min_group_id()
if min_group_id is not None:
add_id = start_id - min_group_id
self.group_ids = [(i + add_id if i is not None else i) for i in
self.group_ids]
self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda
x: x + add_id) for group in self.groups]
def _parameters_to_skeleton(self, parameters):
""" Generates bare skeleton object from parameters."""
self.parameters = parameters
def _nml_to_skeleton(self, nml):
""" Converts wknml to skeleton data structures."""
self.groups = nml.groups
self.branchpoints = nml.branchpoints
self.parameters = Parameters(**nml.parameters._asdict())
for tree in nml.trees:
self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree
.nodes, nml_comments=nml.comments), edges=np.array([(edge.
source, edge.target) for edge in tree.edges]), group_id=
tree.groupId, name=tree.name, color=tree.color)
def _skeleton_to_nml(self):
""" Converts skeleton to wknml data structures."""
trees = []
for tree_idx, tree_id in enumerate(self.tree_ids):
nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])
nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])
tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name
=self.names[tree_idx], groupId=self.group_ids[tree_idx],
nodes=nml_nodes, edges=nml_edges)
trees.append(tree)
nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.
_asdict()), trees=trees, branchpoints=self.branchpoints,
comments=self._skeleton_to_nml_comments(), groups=self.groups)
return nml
def _skeleton_to_nml_comments(self):
""" Converts skeleton to wknml comments."""
nml_comments = []
for nodes in self.nodes:
comment_nodes = nodes[nodes['comment'].notnull()]
for _, row in comment_nodes.iterrows():
nml_comment = wknml.Comment(node=row['id'].values[0],
content=row['comment'].values[0])
nml_comments.append(nml_comment)
return nml_comments
@staticmethod
def define_parameters(name: str, scale: Tuple[float, float, float],
offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,
editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),
editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:
float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=
None, userBoundingBox: Tuple[int, int, int, int, int, int]=None
) ->Parameters:
parameters = Parameters(name=name, scale=scale, offset=offset, time
=time, editPosition=editPosition, editRotation=editRotation,
zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,
userBoundingBox=userBoundingBox)
return parameters
@staticmethod
def get_distance(positions: np.ndarray, position: np.ndarray,
unit_factor: np.ndarray=None):
""" Get the (euclidean) distances between positions and a target position
Args:
positions (N x 3): Array holding (multiple) x, y, z positions
position (1 x 3): Array holding x, y, z position to which the distances should be computed
unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)
Returns:
distances: Arrays holding distances
"""
if unit_factor is None:
unit_factor = np.array([1, 1, 1])
distances = np.sqrt(np.sum(((positions - position) * unit_factor.
reshape(1, 3)) ** 2, axis=1))
return distances
@staticmethod
def _nml_nodes_to_nodes(nml_nodes, nml_comments):
""" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass)."""
data = [(node.id, node.position[0], node.position[1], node.position
[2], node.radius, node.rotation[0], node.rotation[1], node.
rotation[2], node.inVp, node.inMag, node.bitDepth, node.
interpolation, node.time, np.nan) for node in nml_nodes]
nodes = Nodes(data=data)
comment_node_ids = [comment.node for comment in nml_comments]
comment_strings = [comment.content for comment in nml_comments]
nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]
for id in nodes_ids_comments:
id_comment = comment_strings[comment_node_ids.index(id)]
nodes.loc[nodes.id == id, ('comment', '')] = id_comment
return nodes
@staticmethod
def _nodes_to_nml_nodes(nodes):
""" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples)."""
nml_nodes = []
for idx, row in nodes.iterrows():
nml_node = wknml.Node(id=int(row.id), position=tuple(row.
position.values), radius=float(row.radius), rotation=tuple(
row.rotation.values), inVp=int(row.inVp), inMag=int(row.
inMag), bitDepth=int(row.bitDepth), interpolation=bool(row.
interpolation.values), time=int(row.time))
nml_nodes.append(nml_node)
return nml_nodes
@staticmethod
def _edges_to_nml_edges(edges):
""" Converts skeleton edges (numpy array) to wknml edges (list of named tuples)."""
nml_edges = []
for idx in range(edges.shape[0]):
nml_edge = wknml.Edge(source=int(edges[idx, 0]), target=int(
edges[idx, 1]))
nml_edges.append(nml_edge)
return nml_edges
@staticmethod
def _group_append(groups, id, new_group):
""" Appends new group as a child of existing group with specified id. Currently only works up to depth=3."""
path_inds = []
_, _, idx = Skeleton._group_parent(groups, id)
while id is not None:
path_inds.append(idx)
id, idx, _ = Skeleton._group_parent(groups, id)
path_inds = list(reversed(path_inds))
if len(path_inds) == 1:
groups[path_inds[0]]._replace(children=new_group)
elif len(path_inds) == 2:
groups[path_inds[0]].children[path_inds[1]]._replace(children=
new_group)
elif len(path_inds) == 3:
groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]
]._replace(children=new_group)
return groups
@staticmethod
def _group_parent(groups, id, parent_id=None, parent_idx=None,
child_idx=None):
""" Returns the id of the parent group for a (child) group with specified id."""
for group in groups:
if id in [x.id for x in group.children]:
parent_id = group.id
parent_idx = groups.index(group)
child_idx = [x.id for x in group.children].index(id)
else:
parent_id, parent_idx, child_idx = Skeleton._group_parent(group
.children, id, parent_id, parent_idx, child_idx)
return parent_id, parent_idx, child_idx
@staticmethod
def _group_modify_id(group, id_modifier):
""" Modifies group ids with the passed id_modifier (e.g. lambda) function."""
group = group._replace(id=id_modifier(group.id))
group = group._replace(children=list(map(lambda g: Skeleton.
_group_modify_id(g, id_modifier), group.children)))
return group
@staticmethod
def _group_get_ids(groups, ids=[]):
for group in groups:
ids.append(group.id)
Skeleton._group_get_ids(group.children, ids)
return groups, ids
@staticmethod
def _get_graph(nodes: Nodes, edges: np.ndarray):
""" Returns the networkx graph representation of provided nodes and edges."""
graph = nx.Graph()
graph.add_nodes_from(nodes['id'])
attrs = nodes.set_index('id').to_dict('index')
nx.set_node_attributes(graph, attrs)
graph.add_edges_from(edges)
return graph
@staticmethod
def _num_conn_comp(graph):
""" Returns number of connected components for graph"""
return nx.number_connected_components(graph)
<|reserved_special_token_1|>
import os
import numpy as np
import networkx as nx
from matplotlib import colors, cm
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d import Axes3D, art3d
from typing import Union, Sequence, List, Tuple, Optional
import wknml
from wkskel.types import Nodes, Parameters
class Skeleton:
"""The Skeleton class facilitates scientific analysis and manipulation of webKnossos tracings.
It is designed as a high-level interface for working with nml files generated e.g with webKnossos. It makes use of
the (low-level) `wknml` package mostly as an I/O interface to nml files.
Class Attributes:
DEFAULTS (dict): Global default parameters which are passed to each skeleton object instance
"""
DEFAULTS = {
'node': {
'radius': 100,
'comment': ''
},
'tree': {
'color': (0.0, 0.0, 0.0, 1.0)
}
}
def __init__(self, nml_path: str = None, parameters: Parameters = None, strict = True):
""" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments
Args:
nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the
trees and additional properties specified in the .nml file
parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties
of the skeleton.
strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with
webKnossos. Default: True
Examples:
Using nml_path:
nml_path = '/path/to/example.nml'
skel = Skeleton(nml_path)
Using parameters:
parameters = Skeleton.define_parameters(name="2017-01-12_FD0156-2", scale=(11.24, 11.24, 32))
skel = Skeleton(parameters=parameters)
"""
assert (nml_path is not None) ^ (parameters is not None), \
'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'
self.nodes = list()
self.edges = list()
self.names = list()
self.colors = list()
self.tree_ids = list()
self.group_ids = list()
self.groups = list()
self.branchpoints = list()
self.parameters = Parameters()
self.nml_path = str()
self.strict = strict
self.defaults = self.DEFAULTS
# Construct from nml file
if nml_path is not None:
assert os.path.exists(nml_path), \
'not a valid path: {}'.format(nml_path)
try:
with open(nml_path, "rb") as f:
nml = wknml.parse_nml(f)
except IOError:
print('not a valid nml file: {}'.format(nml_path))
self._nml_to_skeleton(nml)
# Construct from parameters
else:
assert type(parameters) is Parameters, \
'provided parameters must be of type wkskel.types.Parameters'
self._parameters_to_skeleton(parameters)
def add_tree(self,
nodes: Nodes = Nodes(),
edges: Union[List[Tuple[int, int]], np.ndarray] = None,
tree_id: int = None,
name: str = '',
group_id: int = None,
color: Tuple[float, float, float, float] = None):
""" Appends new tree to skeleton.
Args:
nodes (optional): Nodes representing tree to be added
edges (optional): Edges representing tree to be added
tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1
name (optional): Name to be used for new tree. Default: Empty str
group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.
Default: None
color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if edges is None:
edges = np.empty((0, 2), dtype=np.uint32)
elif type(edges) is list:
edges = np.asarray(edges)
if self.strict & (len(nodes) > 1):
assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)) == 1, \
'Added tree consists of more than one connected component'
if tree_id is None:
tree_id = self.max_tree_id() + 1
if (group_id is not None) & (group_id not in self.groups_ids()):
self.add_group(id=group_id)
if color is None:
color = self.defaults['tree']['color']
self.nodes.append(nodes)
self.edges.append(edges)
self.tree_ids.append(tree_id)
self.group_ids.append(group_id)
self.names.append(name)
self.colors.append(color)
def add_tree_from_skel(self,
skel: 'Skeleton',
tree_idx: int,
group_id: int = None,
name: str = None):
""" Appends a specific tree contained in a different skeleton object to the skeleton.
Args:
skel: Source skeleton object (different from the one calling this method) to be added
tree_idx: Source tree index of tree to be added
group_id (optional): Target group id to which the added tree should be assigned. Default: None
name (optional): Target name for the added tree
"""
if group_id not in self.groups_ids():
self.add_group(id=group_id)
if name is None:
name = skel.names[tree_idx]
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
self.nodes = self.nodes + [skel.nodes[tree_idx]]
self.edges = self.edges + [skel.edges[tree_idx]]
self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]
self.group_ids = self.group_ids + [group_id]
self.names = self.names + [name]
self.colors = self.colors + [skel.colors[tree_idx]]
return self
def add_trees_from_skel(self, skel: 'Skeleton'):
""" Appends all trees contained in a different skeleton object to the skeleton.
This method attempts to preserve the relative group structure found in the skeleton object to be added
Args:
skel: Source skeleton object (different from the one calling this method) to be added
"""
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
max_group_id = self.max_group_id()
if max_group_id is not None:
skel._reset_group_ids(max_group_id + 1)
self.nodes = self.nodes + skel.nodes
self.edges = self.edges + skel.edges
self.tree_ids = self.tree_ids + skel.tree_ids
self.group_ids = self.group_ids + skel.group_ids
self.groups = self.groups + skel.groups
self.names = self.names + skel.names
self.colors = self.colors + skel.colors
return self
def add_nodes_as_trees(self,
nodes: Nodes,
tree_ids: List[int] = None,
group_ids: List[int] = None,
names: List[str] = None,
colors: List[Tuple[float, float, float, float]] = None):
""" Appends each of the specified nodes as separate trees to the skeleton (1 node each).
Args:
nodes: Nodes representing the trees to be added
tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]
group_ids (optional): Group ids to be assigned to the newly added trees. Default: None
names (optional): Names to be assigned to the newly added trees.
colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if tree_ids is None:
tree_id_start = self.max_tree_id() + 1
tree_id_end = tree_id_start + len(nodes)
tree_ids = list(range(tree_id_start, tree_id_end))
if group_ids is None:
group_ids = [None for x in range(len(nodes))]
if names is None:
names = ['' for x in range(len(nodes))]
if colors is None:
colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]
for node_idx, _ in nodes.iterrows():
self.add_tree(
nodes=nodes[node_idx:node_idx+1],
tree_id=tree_ids[node_idx],
group_id=group_ids[node_idx],
name=names[node_idx],
color=colors[node_idx]
)
def delete_tree(self, idx: int = None, id: int = None):
""" Deletes tree with specified idx or id.
Args:
idx: Linear index of tree to be deleted
id: Id of tree to be deleted
"""
if id is not None:
idx = self.tree_ids.index(id)
self.nodes.pop(idx)
self.edges.pop(idx)
self.names.pop(idx)
self.colors.pop(idx)
self.tree_ids.pop(idx)
self.group_ids.pop(idx)
def add_group(self, parent_id: int = None, id: int = None, name: str = None):
""" Adds a new group to skeleton object.
Args:
parent_id: Parent group id to which new group is added as a child. Default: None (root group)
id: Id of new group to be added. Default: Current max group id + 1
name: Name of new group to be added. Default: 'Group {}'.format(id)
Returns:
id: Id of added group
name: Name of added group
"""
if parent_id is not None:
assert (parent_id in self.group_ids), ('Parent id does not exist')
if id is None:
id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)
else:
assert (id not in self.groups_ids()), ('Id already exists')
if name is None:
name = 'Group {}'.format(id)
new_group = wknml.Group(id, name, [])
if parent_id is None:
self.groups.append(new_group)
else:
self.groups = Skeleton._group_append(self.groups, parent_id, new_group)
return id, name
def delete_group(self, id, target_id):
# TODO
pass
def define_nodes(self,
position_x: List[int],
position_y: List[int],
position_z: List[int],
id: List[int] = None,
radius: Optional[List[int]] = None,
rotation_x: Optional[List[float]] = None,
rotation_y: Optional[List[float]] = None,
rotation_z: Optional[List[float]] = None,
inVP: Optional[List[int]] = None,
inMag: Optional[List[int]] = None,
bitDepth: Optional[List[int]] = None,
interpolation: Optional[List[bool]] = None,
time: Optional[List[int]] = None,
comment: Optional[List[int]] = None) -> Nodes:
""" Generates new nodes table from data.
Args:
position_x: Node position x
position_y: Node position y
position_z: Node position z
id (optional): (Globally unique) Node id. Default: New unique ids are generated
radius (optional): Node radius
rotation_x (optional): Node rotation x
rotation_y (optional): Node rotation y
rotation_z (optional): Node rotation z
inVP (optional): Viewport index in which node was placed
inMag (optional): (De-)Magnification factor in which node was placed
bitDepth (optional): Bit (Color) Depth in which node was placed
interpolation (optional): Interpolation state in which node was placed
time (optional): Time stamp at which node was placed
comment (optional): Comment associated with node
Returns:
nodes: Nodes object
"""
if id is None:
id_max = self.max_node_id()
id = list(range(id_max+1, id_max+len(position_x)+1))
nodes = Nodes.from_list(id, position_x, position_y, position_z, radius, rotation_x, rotation_y,
rotation_z, inVP, inMag, bitDepth, interpolation, time, comment)
return nodes
def define_nodes_from_positions(self, positions: np.ndarray) -> Nodes:
""" Generates new nodes table from positions only (node ids are generated automatically).
Args:
positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table
Returns:
nodes: Nodes object
"""
id_max = self.max_node_id()
id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)).reshape(-1, 1)
nodes = Nodes.from_numpy(np.append(id, positions, axis=1))
return nodes
def get_distances_to_node(self,
positions: Union[Sequence[Tuple[int, int, int]], np.ndarray],
node_id: int = None,
tree_idx: int = None,
node_idx: int = None,
unit: str = 'um') -> List[np.ndarray]:
""" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions
Args:
positions (N x 3): Target (x,y,z) positions to which the distances should be computed
node_id: Node id of the node for which the distances should be computed
tree_idx: Tree idx of the node for which the distances should be computed
node_idx: Node idx of the node for which the distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
assert (node_id is not None) ^ ((tree_idx is not None) & (node_idx is not None)), \
'Either provide node_id or both tree_idx and node_idx'
if type(positions) is not np.ndarray:
positions = np.array(positions)
if node_id is not None:
node_idx, tree_idx = self.node_id_to_idx(node_id)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(positions, np.array(self.nodes[tree_idx].position.values[node_idx]), unit_factor)
return distances
def get_distance_to_nodes(self,
position: Union[Tuple[int, int, int], np.ndarray],
tree_idx: int,
unit: str = 'um') -> List[np.ndarray]:
""" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position
Args:
position (1 x 3): Target (x,y,z) position to which the node distances should be computed
tree_idx: Tree idx for which node distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
if type(position) is not np.ndarray:
position = np.array(position)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].position.values), position, unit_factor)
return distances
def get_graph(self, tree_idx):
""" Returns the networkx graph representation of a tree.
Args:
tree_idx: Linear index of the tree to be returned as graph object
Returns:
graph: Graph object
"""
nodes = self.nodes[tree_idx]
edges = self.edges[tree_idx]
graph = Skeleton._get_graph(nodes, edges)
return graph
def get_shortest_path(self, node_id_start: int, node_id_end: int) -> List[int]:
""" Returns the shortest path between two nodes of a tree.
Args:
node_id_start: Node id of start node
node_id_end: Node id of end node
Returns:
shortest_path: Node indices comprising the shortest path
"""
_, tree_idx_start = self.node_id_to_idx(node_id_start)
_, tree_idx_end = self.node_id_to_idx(node_id_end)
assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'
graph = self.get_graph(tree_idx_start)
shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)
return shortest_path
def plot(self,
tree_inds: Union[int, List[int]] = None,
view: str = None,
colors: Union[Tuple[float, float, float, float], List[Tuple[float, float, float, float]], str] = None,
unit: str = 'um',
show: bool = True,
ax: plt.axes = None):
""" Generates a (3D) line plot of the trees contained in the skeleton object.
Args:
tree_inds (optional): Tree indices to be plotted.
Default: All trees are plotted
view (optional): Plot as 2D projection on orthonormal plane.
Options: 'xy', 'xz', 'yz'
Default: Plot as 3D projection
colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is
broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.
Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can
be passed as a str.
Default: Skeleton colors (self.colors) are used
unit (optional): Specifies in which unit the plot should be generated.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).
Default: 'um' (micrometer)
show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set
to False. Default: True
ax: Axes to be plotted on.
Returns:
ax: Axes which was plotted on
"""
if tree_inds is None:
tree_inds = list(range(len(self.nodes)))
elif tree_inds is int:
tree_inds = [tree_inds]
if colors is None:
colors = self.colors
elif type(colors) is str:
cmap = cm.get_cmap(colors)
colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]
elif type(colors[0]) is not Sequence:
colors = [colors] * self.num_trees()
unit_factor = self._get_unit_factor(unit)
allowed_views = ['xy', 'xz', 'yz']
if view is not None:
assert (view in allowed_views), \
'The passed view argument: {} is not among the allowed views: {}'.format(view, allowed_views)
if ax is None:
fig = plt.figure()
if view is None:
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(111, projection='rectilinear')
else:
if view is None:
assert (ax.name == '3d'), \
'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'
else:
assert (ax.name != '3d'), \
'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'
lims_min = []
lims_max = []
for tree_idx in tree_inds:
edges = self.edges[tree_idx].copy()
nodes = self.nodes[tree_idx].copy()
if len(nodes) > 0:
nodes['position'] = nodes['position'].multiply(unit_factor)
if view == 'xy':
nodes = nodes.drop([('position', 'z')], axis=1)
elif view == 'xz':
nodes = nodes.drop([('position', 'y')], axis=1)
elif view == 'yz':
nodes = nodes.drop([('position', 'x')], axis=1)
lims_min.append(np.min(nodes['position'].values, axis=0))
lims_max.append(np.max(nodes['position'].values, axis=0))
segments = []
for edge in edges:
n0 = nodes['position'][nodes.id == edge[0]].values[0]
n1 = nodes['position'][nodes.id == edge[1]].values[0]
segment = [[c for c in n0], [c for c in n1]]
segments.append(segment)
if view is None:
line_collection = art3d.Line3DCollection(segments=segments, colors=colors[tree_idx])
ax.add_collection3d(line_collection)
else:
line_collection = LineCollection(segments=segments, colors=colors[tree_idx])
ax.add_collection(line_collection)
lim_min = np.min(np.array(lims_min), axis=0)
lim_max = np.max(np.array(lims_max), axis=0)
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
if view is None:
ax.set_zlim(lim_min[2], lim_max[2])
else:
ax.set_aspect('equal')
if show:
plt.show()
return ax
def write_nml(self, nml_write_path):
""" Writes the present state of the skeleton object to a .nml file.
Args:
nml_write_path: Path to which .nml file should be written
"""
# If the object does not have any trees, construct an empty tree before writing to enable webKnossos import
if self.num_trees() == 0:
self.add_tree()
nml = self._skeleton_to_nml()
with open(nml_write_path, "wb") as f:
wknml.write_nml(f, nml)
# Convenience Methods
def node_id_to_idx(self, node_id: int) -> (int, int):
""" Returns the linear tree and node indices for the provided node id."""
node_idx = None
for tree_idx, nodes in enumerate(self.nodes):
index_list = nodes[nodes['id'] == node_id].index.tolist()
if index_list:
node_idx = index_list[0]
break
assert (node_idx is not None), \
'node id {} does not exist'.format(node_id)
return node_idx, tree_idx
def node_idx_to_id(self, node_idx: int, tree_idx: int) -> int:
""" Returns the node id for the provided tree and node idx."""
node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]
return node_id
def min_group_id(self) -> int:
""" Returns lowest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmin(group_ids))
return group_id
def max_group_id(self) -> int:
""" Returns highest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmax(group_ids))
return group_id
def min_node_id(self) -> int:
""" Returns lowest global node id."""
if len(self.nodes) > 0:
min_node_id = min([min(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])
else:
min_node_id = 0
return min_node_id
def max_node_id(self) -> int:
""" Returns highest global node id."""
if len(self.nodes) > 0:
max_node_id = max([max(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])
else:
max_node_id = 0
return max_node_id
def min_tree_id(self) -> int:
""" Returns lowest global tree id."""
return min(self.tree_ids) if len(self.tree_ids)>0 else 0
def max_tree_id(self) -> int:
""" Returns highest global tree id."""
return max(self.tree_ids) if len(self.tree_ids)>0 else 0
def num_trees(self) -> int:
"""Returns number of trees contained in skeleton object."""
return len(self.nodes)
def groups_ids(self) -> List[int]:
""" Returns all ids defined in groups tree"""
_, groups_ids = Skeleton._group_get_ids(self.groups)
return groups_ids
# Private Methods
def _get_unit_factor(self, unit: str) -> np.ndarray:
""" Returns factor for unit conversion
Args:
unit: Unit for which to return the conversion factor.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)
Returns:
unit_factor (shape=(3,)): Unit conversion factors
"""
unit_factors = {
'vx': np.array((1, 1, 1)),
'nm': np.array(self.parameters.scale),
'um': np.array(self.parameters.scale)/1000
}
assert unit in unit_factors.keys(), 'Invalid unit'
unit_factor = unit_factors[unit]
return unit_factor
def _reset_node_ids(self, start_id: int):
""" Resets node ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest node id should be set.
"""
add_id = start_id - self.min_node_id()
for tree_idx, _ in enumerate(self.nodes):
self.nodes[tree_idx].nodes['id'] += add_id
self.edges[tree_idx] += add_id
def _reset_tree_ids(self, start_id: int):
""" Resets tree ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest tree id should be set.
"""
add_id = start_id - self.min_tree_id()
self.tree_ids = [tree_id + add_id for tree_id in self.tree_ids]
def _reset_group_ids(self, start_id: int):
""" Resets group ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest group id should be set.
"""
min_group_id = self.min_group_id()
if min_group_id is not None:
add_id = start_id - min_group_id
self.group_ids = [i + add_id if i is not None else i for i in self.group_ids]
self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda x: x + add_id) for group in self.groups]
def _parameters_to_skeleton(self, parameters):
""" Generates bare skeleton object from parameters."""
self.parameters = parameters
def _nml_to_skeleton(self, nml):
""" Converts wknml to skeleton data structures."""
self.groups = nml.groups
self.branchpoints = nml.branchpoints
self.parameters = Parameters(**nml.parameters._asdict())
for tree in nml.trees:
self.add_tree(
nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree.nodes, nml_comments=nml.comments),
edges=np.array([(edge.source, edge.target) for edge in tree.edges]),
group_id=tree.groupId,
name=tree.name,
color=tree.color
)
def _skeleton_to_nml(self):
""" Converts skeleton to wknml data structures."""
trees = []
for tree_idx, tree_id in enumerate(self.tree_ids):
nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])
nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])
tree = wknml.Tree(
id=tree_id,
color=self.colors[tree_idx],
name=self.names[tree_idx],
groupId=self.group_ids[tree_idx],
nodes=nml_nodes,
edges=nml_edges
)
trees.append(tree)
nml = wknml.NML(
parameters=wknml.NMLParameters(**self.parameters._asdict()),
trees=trees,
branchpoints=self.branchpoints,
comments=self._skeleton_to_nml_comments(),
groups=self.groups
)
return nml
def _skeleton_to_nml_comments(self):
""" Converts skeleton to wknml comments."""
nml_comments = []
for nodes in self.nodes:
comment_nodes = nodes[nodes['comment'].notnull()]
for _, row in comment_nodes.iterrows():
nml_comment = wknml.Comment(
node=row['id'].values[0],
content=row['comment'].values[0]
)
nml_comments.append(nml_comment)
return nml_comments
# Static Methods
@staticmethod
def define_parameters(
name: str,
scale: Tuple[float, float, float],
offset: Tuple[float, float, float] = (0, 0, 0),
time: int = 0,
editPosition: Tuple[float, float, float] = (1.0, 1.0, 1.0),
editRotation: Tuple[float, float, float] = (0.0, 0.0, 0.0),
zoomLevel: float = 1.0,
taskBoundingBox: Tuple[int, int, int, int, int, int] = None,
userBoundingBox: Tuple[int, int, int, int, int, int] = None) -> Parameters:
parameters = Parameters(
name=name,
scale=scale,
offset=offset,
time=time,
editPosition=editPosition,
editRotation=editRotation,
zoomLevel=zoomLevel,
taskBoundingBox=taskBoundingBox,
userBoundingBox=userBoundingBox
)
return parameters
# Static Methods
@staticmethod
def get_distance(positions: np.ndarray, position: np.ndarray, unit_factor: np.ndarray = None):
""" Get the (euclidean) distances between positions and a target position
Args:
positions (N x 3): Array holding (multiple) x, y, z positions
position (1 x 3): Array holding x, y, z position to which the distances should be computed
unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)
Returns:
distances: Arrays holding distances
"""
if unit_factor is None:
unit_factor = np.array([1, 1, 1])
distances = np.sqrt(np.sum(((positions - position) * unit_factor.reshape(1, 3)) ** 2, axis=1))
return distances
# Static Private Methods
@staticmethod
def _nml_nodes_to_nodes(nml_nodes, nml_comments):
""" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass)."""
data = [(node.id, node.position[0], node.position[1], node.position[2], node.radius, node.rotation[0],
node.rotation[1], node.rotation[2], node.inVp, node.inMag, node.bitDepth, node.interpolation,
node.time, np.nan) for node in nml_nodes]
nodes = Nodes(data=data)
# Add comments to nodes table
comment_node_ids = [comment.node for comment in nml_comments]
comment_strings = [comment.content for comment in nml_comments]
nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]
for id in nodes_ids_comments:
id_comment = comment_strings[comment_node_ids.index(id)]
nodes.loc[nodes.id == id, ('comment', '')] = id_comment
return nodes
@staticmethod
def _nodes_to_nml_nodes(nodes):
""" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples)."""
nml_nodes = []
for idx, row in nodes.iterrows():
nml_node = wknml.Node(
id=int(row.id),
position=tuple(row.position.values),
radius=float(row.radius),
rotation=tuple(row.rotation.values),
inVp=int(row.inVp),
inMag=int(row.inMag),
bitDepth=int(row.bitDepth),
interpolation=bool(row.interpolation.values),
time=int(row.time)
)
nml_nodes.append(nml_node)
return nml_nodes
@staticmethod
def _edges_to_nml_edges(edges):
""" Converts skeleton edges (numpy array) to wknml edges (list of named tuples)."""
nml_edges = []
for idx in range(edges.shape[0]):
nml_edge = wknml.Edge(
source=int(edges[idx, 0]),
target=int(edges[idx, 1]),
)
nml_edges.append(nml_edge)
return nml_edges
@staticmethod
def _group_append(groups, id, new_group):
""" Appends new group as a child of existing group with specified id. Currently only works up to depth=3."""
path_inds = []
_, _, idx = Skeleton._group_parent(groups, id)
while id is not None:
path_inds.append(idx)
id, idx, _ = Skeleton._group_parent(groups, id)
path_inds = list(reversed(path_inds))
if len(path_inds) == 1:
groups[path_inds[0]]._replace(children=new_group)
elif len(path_inds) == 2:
groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)
elif len(path_inds) == 3:
groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)
return groups
@staticmethod
def _group_parent(groups, id, parent_id=None, parent_idx=None, child_idx=None):
""" Returns the id of the parent group for a (child) group with specified id."""
for group in groups:
if id in [x.id for x in group.children]:
parent_id = group.id
parent_idx = groups.index(group)
child_idx = [x.id for x in group.children].index(id)
else:
parent_id, parent_idx, child_idx = Skeleton._group_parent(group.children, id, parent_id, parent_idx, child_idx)
return parent_id, parent_idx, child_idx
@staticmethod
def _group_modify_id(group, id_modifier):
""" Modifies group ids with the passed id_modifier (e.g. lambda) function."""
group = group._replace(id=id_modifier(group.id))
group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))
return group
@staticmethod
def _group_get_ids(groups, ids = []):
for group in groups:
ids.append(group.id)
Skeleton._group_get_ids(group.children, ids)
return groups, ids
@staticmethod
def _get_graph(nodes: Nodes, edges: np.ndarray):
""" Returns the networkx graph representation of provided nodes and edges."""
graph = nx.Graph()
graph.add_nodes_from(nodes['id'])
attrs = nodes.set_index('id').to_dict('index')
nx.set_node_attributes(graph, attrs)
graph.add_edges_from(edges)
return graph
@staticmethod
def _num_conn_comp(graph):
""" Returns number of connected components for graph"""
return nx.number_connected_components(graph)
|
flexible
|
{
"blob_id": "365d031a31f3596df6fb71e620c293382d6ead1f",
"index": 2635,
"step-1": "<mask token>\n\n\nclass Skeleton:\n <mask token>\n <mask token>\n\n def __init__(self, nml_path: str=None, parameters: Parameters=None,\n strict=True):\n \"\"\" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments\n\n Args:\n nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the\n trees and additional properties specified in the .nml file\n parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties\n of the skeleton.\n strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with\n webKnossos. Default: True\n\n Examples:\n Using nml_path:\n nml_path = '/path/to/example.nml'\n skel = Skeleton(nml_path)\n\n Using parameters:\n parameters = Skeleton.define_parameters(name=\"2017-01-12_FD0156-2\", scale=(11.24, 11.24, 32))\n skel = Skeleton(parameters=parameters)\n \"\"\"\n assert (nml_path is not None) ^ (parameters is not None\n ), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'\n self.nodes = list()\n self.edges = list()\n self.names = list()\n self.colors = list()\n self.tree_ids = list()\n self.group_ids = list()\n self.groups = list()\n self.branchpoints = list()\n self.parameters = Parameters()\n self.nml_path = str()\n self.strict = strict\n self.defaults = self.DEFAULTS\n if nml_path is not None:\n assert os.path.exists(nml_path), 'not a valid path: {}'.format(\n nml_path)\n try:\n with open(nml_path, 'rb') as f:\n nml = wknml.parse_nml(f)\n except IOError:\n print('not a valid nml file: {}'.format(nml_path))\n self._nml_to_skeleton(nml)\n else:\n assert type(parameters\n ) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'\n self._parameters_to_skeleton(parameters)\n <mask token>\n <mask token>\n\n def add_trees_from_skel(self, skel: 'Skeleton'):\n \"\"\" Appends all trees contained in a different skeleton object to the skeleton.\n\n This method attempts to preserve the relative group structure found in the skeleton object to be added\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n \"\"\"\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n max_group_id = self.max_group_id()\n if max_group_id is not None:\n skel._reset_group_ids(max_group_id + 1)\n self.nodes = self.nodes + skel.nodes\n self.edges = self.edges + skel.edges\n self.tree_ids = self.tree_ids + skel.tree_ids\n self.group_ids = self.group_ids + skel.group_ids\n self.groups = self.groups + skel.groups\n self.names = self.names + skel.names\n self.colors = self.colors + skel.colors\n return self\n\n def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,\n group_ids: List[int]=None, names: List[str]=None, colors: List[\n Tuple[float, float, float, float]]=None):\n \"\"\" Appends each of the specified nodes as separate trees to the skeleton (1 node each).\n\n Args:\n nodes: Nodes representing the trees to be added\n tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]\n group_ids (optional): Group ids to be assigned to the newly added trees. Default: None\n names (optional): Names to be assigned to the newly added trees.\n colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n if tree_ids is None:\n tree_id_start = self.max_tree_id() + 1\n tree_id_end = tree_id_start + len(nodes)\n tree_ids = list(range(tree_id_start, tree_id_end))\n if group_ids is None:\n group_ids = [None for x in range(len(nodes))]\n if names is None:\n names = ['' for x in range(len(nodes))]\n if colors is None:\n colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]\n for node_idx, _ in nodes.iterrows():\n self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=\n tree_ids[node_idx], group_id=group_ids[node_idx], name=\n names[node_idx], color=colors[node_idx])\n <mask token>\n\n def add_group(self, parent_id: int=None, id: int=None, name: str=None):\n \"\"\" Adds a new group to skeleton object.\n\n Args:\n parent_id: Parent group id to which new group is added as a child. Default: None (root group)\n id: Id of new group to be added. Default: Current max group id + 1\n name: Name of new group to be added. Default: 'Group {}'.format(id)\n\n Returns:\n id: Id of added group\n name: Name of added group\n\n \"\"\"\n if parent_id is not None:\n assert parent_id in self.group_ids, 'Parent id does not exist'\n if id is None:\n id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)\n else:\n assert id not in self.groups_ids(), 'Id already exists'\n if name is None:\n name = 'Group {}'.format(id)\n new_group = wknml.Group(id, name, [])\n if parent_id is None:\n self.groups.append(new_group)\n else:\n self.groups = Skeleton._group_append(self.groups, parent_id,\n new_group)\n return id, name\n\n def delete_group(self, id, target_id):\n pass\n\n def define_nodes(self, position_x: List[int], position_y: List[int],\n position_z: List[int], id: List[int]=None, radius: Optional[List[\n int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:\n Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,\n inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,\n bitDepth: Optional[List[int]]=None, interpolation: Optional[List[\n bool]]=None, time: Optional[List[int]]=None, comment: Optional[List\n [int]]=None) ->Nodes:\n \"\"\" Generates new nodes table from data.\n\n Args:\n position_x: Node position x\n position_y: Node position y\n position_z: Node position z\n id (optional): (Globally unique) Node id. Default: New unique ids are generated\n radius (optional): Node radius\n rotation_x (optional): Node rotation x\n rotation_y (optional): Node rotation y\n rotation_z (optional): Node rotation z\n inVP (optional): Viewport index in which node was placed\n inMag (optional): (De-)Magnification factor in which node was placed\n bitDepth (optional): Bit (Color) Depth in which node was placed\n interpolation (optional): Interpolation state in which node was placed\n time (optional): Time stamp at which node was placed\n comment (optional): Comment associated with node\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n if id is None:\n id_max = self.max_node_id()\n id = list(range(id_max + 1, id_max + len(position_x) + 1))\n nodes = Nodes.from_list(id, position_x, position_y, position_z,\n radius, rotation_x, rotation_y, rotation_z, inVP, inMag,\n bitDepth, interpolation, time, comment)\n return nodes\n\n def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:\n \"\"\" Generates new nodes table from positions only (node ids are generated automatically).\n\n Args:\n positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n id_max = self.max_node_id()\n id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)\n ).reshape(-1, 1)\n nodes = Nodes.from_numpy(np.append(id, positions, axis=1))\n return nodes\n <mask token>\n\n def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],\n np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position\n\n Args:\n position (1 x 3): Target (x,y,z) position to which the node distances should be computed\n tree_idx: Tree idx for which node distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n if type(position) is not np.ndarray:\n position = np.array(position)\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].\n position.values), position, unit_factor)\n return distances\n\n def get_graph(self, tree_idx):\n \"\"\" Returns the networkx graph representation of a tree.\n\n Args:\n tree_idx: Linear index of the tree to be returned as graph object\n\n Returns:\n graph: Graph object\n\n \"\"\"\n nodes = self.nodes[tree_idx]\n edges = self.edges[tree_idx]\n graph = Skeleton._get_graph(nodes, edges)\n return graph\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:\n \"\"\" Returns the node id for the provided tree and node idx.\"\"\"\n node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]\n return node_id\n <mask token>\n\n def max_group_id(self) ->int:\n \"\"\" Returns highest group id. If no groups are defined, return None\"\"\"\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmax(group_ids))\n return group_id\n\n def min_node_id(self) ->int:\n \"\"\" Returns lowest global node id.\"\"\"\n if len(self.nodes) > 0:\n min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for\n nodes in self.nodes])\n else:\n min_node_id = 0\n return min_node_id\n <mask token>\n\n def min_tree_id(self) ->int:\n \"\"\" Returns lowest global tree id.\"\"\"\n return min(self.tree_ids) if len(self.tree_ids) > 0 else 0\n <mask token>\n\n def num_trees(self) ->int:\n \"\"\"Returns number of trees contained in skeleton object.\"\"\"\n return len(self.nodes)\n <mask token>\n\n def _get_unit_factor(self, unit: str) ->np.ndarray:\n \"\"\" Returns factor for unit conversion\n\n Args:\n unit: Unit for which to return the conversion factor.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)\n\n Returns:\n unit_factor (shape=(3,)): Unit conversion factors\n \"\"\"\n unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.\n parameters.scale), 'um': np.array(self.parameters.scale) / 1000}\n assert unit in unit_factors.keys(), 'Invalid unit'\n unit_factor = unit_factors[unit]\n return unit_factor\n <mask token>\n\n def _reset_tree_ids(self, start_id: int):\n \"\"\" Resets tree ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest tree id should be set.\n \"\"\"\n add_id = start_id - self.min_tree_id()\n self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]\n <mask token>\n <mask token>\n\n def _nml_to_skeleton(self, nml):\n \"\"\" Converts wknml to skeleton data structures.\"\"\"\n self.groups = nml.groups\n self.branchpoints = nml.branchpoints\n self.parameters = Parameters(**nml.parameters._asdict())\n for tree in nml.trees:\n self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree\n .nodes, nml_comments=nml.comments), edges=np.array([(edge.\n source, edge.target) for edge in tree.edges]), group_id=\n tree.groupId, name=tree.name, color=tree.color)\n\n def _skeleton_to_nml(self):\n \"\"\" Converts skeleton to wknml data structures.\"\"\"\n trees = []\n for tree_idx, tree_id in enumerate(self.tree_ids):\n nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])\n nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])\n tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name\n =self.names[tree_idx], groupId=self.group_ids[tree_idx],\n nodes=nml_nodes, edges=nml_edges)\n trees.append(tree)\n nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.\n _asdict()), trees=trees, branchpoints=self.branchpoints,\n comments=self._skeleton_to_nml_comments(), groups=self.groups)\n return nml\n\n def _skeleton_to_nml_comments(self):\n \"\"\" Converts skeleton to wknml comments.\"\"\"\n nml_comments = []\n for nodes in self.nodes:\n comment_nodes = nodes[nodes['comment'].notnull()]\n for _, row in comment_nodes.iterrows():\n nml_comment = wknml.Comment(node=row['id'].values[0],\n content=row['comment'].values[0])\n nml_comments.append(nml_comment)\n return nml_comments\n\n @staticmethod\n def define_parameters(name: str, scale: Tuple[float, float, float],\n offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,\n editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),\n editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:\n float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=\n None, userBoundingBox: Tuple[int, int, int, int, int, int]=None\n ) ->Parameters:\n parameters = Parameters(name=name, scale=scale, offset=offset, time\n =time, editPosition=editPosition, editRotation=editRotation,\n zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,\n userBoundingBox=userBoundingBox)\n return parameters\n <mask token>\n\n @staticmethod\n def _nml_nodes_to_nodes(nml_nodes, nml_comments):\n \"\"\" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass).\"\"\"\n data = [(node.id, node.position[0], node.position[1], node.position\n [2], node.radius, node.rotation[0], node.rotation[1], node.\n rotation[2], node.inVp, node.inMag, node.bitDepth, node.\n interpolation, node.time, np.nan) for node in nml_nodes]\n nodes = Nodes(data=data)\n comment_node_ids = [comment.node for comment in nml_comments]\n comment_strings = [comment.content for comment in nml_comments]\n nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]\n for id in nodes_ids_comments:\n id_comment = comment_strings[comment_node_ids.index(id)]\n nodes.loc[nodes.id == id, ('comment', '')] = id_comment\n return nodes\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def _group_modify_id(group, id_modifier):\n \"\"\" Modifies group ids with the passed id_modifier (e.g. lambda) function.\"\"\"\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton.\n _group_modify_id(g, id_modifier), group.children)))\n return group\n\n @staticmethod\n def _group_get_ids(groups, ids=[]):\n for group in groups:\n ids.append(group.id)\n Skeleton._group_get_ids(group.children, ids)\n return groups, ids\n\n @staticmethod\n def _get_graph(nodes: Nodes, edges: np.ndarray):\n \"\"\" Returns the networkx graph representation of provided nodes and edges.\"\"\"\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n return graph\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Skeleton:\n <mask token>\n <mask token>\n\n def __init__(self, nml_path: str=None, parameters: Parameters=None,\n strict=True):\n \"\"\" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments\n\n Args:\n nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the\n trees and additional properties specified in the .nml file\n parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties\n of the skeleton.\n strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with\n webKnossos. Default: True\n\n Examples:\n Using nml_path:\n nml_path = '/path/to/example.nml'\n skel = Skeleton(nml_path)\n\n Using parameters:\n parameters = Skeleton.define_parameters(name=\"2017-01-12_FD0156-2\", scale=(11.24, 11.24, 32))\n skel = Skeleton(parameters=parameters)\n \"\"\"\n assert (nml_path is not None) ^ (parameters is not None\n ), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'\n self.nodes = list()\n self.edges = list()\n self.names = list()\n self.colors = list()\n self.tree_ids = list()\n self.group_ids = list()\n self.groups = list()\n self.branchpoints = list()\n self.parameters = Parameters()\n self.nml_path = str()\n self.strict = strict\n self.defaults = self.DEFAULTS\n if nml_path is not None:\n assert os.path.exists(nml_path), 'not a valid path: {}'.format(\n nml_path)\n try:\n with open(nml_path, 'rb') as f:\n nml = wknml.parse_nml(f)\n except IOError:\n print('not a valid nml file: {}'.format(nml_path))\n self._nml_to_skeleton(nml)\n else:\n assert type(parameters\n ) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'\n self._parameters_to_skeleton(parameters)\n\n def add_tree(self, nodes: Nodes=Nodes(), edges: Union[List[Tuple[int,\n int]], np.ndarray]=None, tree_id: int=None, name: str='', group_id:\n int=None, color: Tuple[float, float, float, float]=None):\n \"\"\" Appends new tree to skeleton.\n\n Args:\n nodes (optional): Nodes representing tree to be added\n edges (optional): Edges representing tree to be added\n tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1\n name (optional): Name to be used for new tree. Default: Empty str\n group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.\n Default: None\n color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n if edges is None:\n edges = np.empty((0, 2), dtype=np.uint32)\n elif type(edges) is list:\n edges = np.asarray(edges)\n if self.strict & (len(nodes) > 1):\n assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)\n ) == 1, 'Added tree consists of more than one connected component'\n if tree_id is None:\n tree_id = self.max_tree_id() + 1\n if (group_id is not None) & (group_id not in self.groups_ids()):\n self.add_group(id=group_id)\n if color is None:\n color = self.defaults['tree']['color']\n self.nodes.append(nodes)\n self.edges.append(edges)\n self.tree_ids.append(tree_id)\n self.group_ids.append(group_id)\n self.names.append(name)\n self.colors.append(color)\n\n def add_tree_from_skel(self, skel: 'Skeleton', tree_idx: int, group_id:\n int=None, name: str=None):\n \"\"\" Appends a specific tree contained in a different skeleton object to the skeleton.\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n tree_idx: Source tree index of tree to be added\n group_id (optional): Target group id to which the added tree should be assigned. Default: None\n name (optional): Target name for the added tree\n \"\"\"\n if group_id not in self.groups_ids():\n self.add_group(id=group_id)\n if name is None:\n name = skel.names[tree_idx]\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n self.nodes = self.nodes + [skel.nodes[tree_idx]]\n self.edges = self.edges + [skel.edges[tree_idx]]\n self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]\n self.group_ids = self.group_ids + [group_id]\n self.names = self.names + [name]\n self.colors = self.colors + [skel.colors[tree_idx]]\n return self\n\n def add_trees_from_skel(self, skel: 'Skeleton'):\n \"\"\" Appends all trees contained in a different skeleton object to the skeleton.\n\n This method attempts to preserve the relative group structure found in the skeleton object to be added\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n \"\"\"\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n max_group_id = self.max_group_id()\n if max_group_id is not None:\n skel._reset_group_ids(max_group_id + 1)\n self.nodes = self.nodes + skel.nodes\n self.edges = self.edges + skel.edges\n self.tree_ids = self.tree_ids + skel.tree_ids\n self.group_ids = self.group_ids + skel.group_ids\n self.groups = self.groups + skel.groups\n self.names = self.names + skel.names\n self.colors = self.colors + skel.colors\n return self\n\n def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,\n group_ids: List[int]=None, names: List[str]=None, colors: List[\n Tuple[float, float, float, float]]=None):\n \"\"\" Appends each of the specified nodes as separate trees to the skeleton (1 node each).\n\n Args:\n nodes: Nodes representing the trees to be added\n tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]\n group_ids (optional): Group ids to be assigned to the newly added trees. Default: None\n names (optional): Names to be assigned to the newly added trees.\n colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n if tree_ids is None:\n tree_id_start = self.max_tree_id() + 1\n tree_id_end = tree_id_start + len(nodes)\n tree_ids = list(range(tree_id_start, tree_id_end))\n if group_ids is None:\n group_ids = [None for x in range(len(nodes))]\n if names is None:\n names = ['' for x in range(len(nodes))]\n if colors is None:\n colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]\n for node_idx, _ in nodes.iterrows():\n self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=\n tree_ids[node_idx], group_id=group_ids[node_idx], name=\n names[node_idx], color=colors[node_idx])\n\n def delete_tree(self, idx: int=None, id: int=None):\n \"\"\" Deletes tree with specified idx or id.\n\n Args:\n idx: Linear index of tree to be deleted\n id: Id of tree to be deleted\n\n \"\"\"\n if id is not None:\n idx = self.tree_ids.index(id)\n self.nodes.pop(idx)\n self.edges.pop(idx)\n self.names.pop(idx)\n self.colors.pop(idx)\n self.tree_ids.pop(idx)\n self.group_ids.pop(idx)\n\n def add_group(self, parent_id: int=None, id: int=None, name: str=None):\n \"\"\" Adds a new group to skeleton object.\n\n Args:\n parent_id: Parent group id to which new group is added as a child. Default: None (root group)\n id: Id of new group to be added. Default: Current max group id + 1\n name: Name of new group to be added. Default: 'Group {}'.format(id)\n\n Returns:\n id: Id of added group\n name: Name of added group\n\n \"\"\"\n if parent_id is not None:\n assert parent_id in self.group_ids, 'Parent id does not exist'\n if id is None:\n id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)\n else:\n assert id not in self.groups_ids(), 'Id already exists'\n if name is None:\n name = 'Group {}'.format(id)\n new_group = wknml.Group(id, name, [])\n if parent_id is None:\n self.groups.append(new_group)\n else:\n self.groups = Skeleton._group_append(self.groups, parent_id,\n new_group)\n return id, name\n\n def delete_group(self, id, target_id):\n pass\n\n def define_nodes(self, position_x: List[int], position_y: List[int],\n position_z: List[int], id: List[int]=None, radius: Optional[List[\n int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:\n Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,\n inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,\n bitDepth: Optional[List[int]]=None, interpolation: Optional[List[\n bool]]=None, time: Optional[List[int]]=None, comment: Optional[List\n [int]]=None) ->Nodes:\n \"\"\" Generates new nodes table from data.\n\n Args:\n position_x: Node position x\n position_y: Node position y\n position_z: Node position z\n id (optional): (Globally unique) Node id. Default: New unique ids are generated\n radius (optional): Node radius\n rotation_x (optional): Node rotation x\n rotation_y (optional): Node rotation y\n rotation_z (optional): Node rotation z\n inVP (optional): Viewport index in which node was placed\n inMag (optional): (De-)Magnification factor in which node was placed\n bitDepth (optional): Bit (Color) Depth in which node was placed\n interpolation (optional): Interpolation state in which node was placed\n time (optional): Time stamp at which node was placed\n comment (optional): Comment associated with node\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n if id is None:\n id_max = self.max_node_id()\n id = list(range(id_max + 1, id_max + len(position_x) + 1))\n nodes = Nodes.from_list(id, position_x, position_y, position_z,\n radius, rotation_x, rotation_y, rotation_z, inVP, inMag,\n bitDepth, interpolation, time, comment)\n return nodes\n\n def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:\n \"\"\" Generates new nodes table from positions only (node ids are generated automatically).\n\n Args:\n positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n id_max = self.max_node_id()\n id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)\n ).reshape(-1, 1)\n nodes = Nodes.from_numpy(np.append(id, positions, axis=1))\n return nodes\n\n def get_distances_to_node(self, positions: Union[Sequence[Tuple[int,\n int, int]], np.ndarray], node_id: int=None, tree_idx: int=None,\n node_idx: int=None, unit: str='um') ->List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions\n\n Args:\n positions (N x 3): Target (x,y,z) positions to which the distances should be computed\n node_id: Node id of the node for which the distances should be computed\n tree_idx: Tree idx of the node for which the distances should be computed\n node_idx: Node idx of the node for which the distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n assert (node_id is not None) ^ (tree_idx is not None) & (node_idx\n is not None\n ), 'Either provide node_id or both tree_idx and node_idx'\n if type(positions) is not np.ndarray:\n positions = np.array(positions)\n if node_id is not None:\n node_idx, tree_idx = self.node_id_to_idx(node_id)\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(positions, np.array(self.nodes[\n tree_idx].position.values[node_idx]), unit_factor)\n return distances\n\n def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],\n np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position\n\n Args:\n position (1 x 3): Target (x,y,z) position to which the node distances should be computed\n tree_idx: Tree idx for which node distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n if type(position) is not np.ndarray:\n position = np.array(position)\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].\n position.values), position, unit_factor)\n return distances\n\n def get_graph(self, tree_idx):\n \"\"\" Returns the networkx graph representation of a tree.\n\n Args:\n tree_idx: Linear index of the tree to be returned as graph object\n\n Returns:\n graph: Graph object\n\n \"\"\"\n nodes = self.nodes[tree_idx]\n edges = self.edges[tree_idx]\n graph = Skeleton._get_graph(nodes, edges)\n return graph\n\n def get_shortest_path(self, node_id_start: int, node_id_end: int) ->List[\n int]:\n \"\"\" Returns the shortest path between two nodes of a tree.\n\n Args:\n node_id_start: Node id of start node\n node_id_end: Node id of end node\n\n Returns:\n shortest_path: Node indices comprising the shortest path\n\n \"\"\"\n _, tree_idx_start = self.node_id_to_idx(node_id_start)\n _, tree_idx_end = self.node_id_to_idx(node_id_end)\n assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'\n graph = self.get_graph(tree_idx_start)\n shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)\n return shortest_path\n\n def plot(self, tree_inds: Union[int, List[int]]=None, view: str=None,\n colors: Union[Tuple[float, float, float, float], List[Tuple[float,\n float, float, float]], str]=None, unit: str='um', show: bool=True,\n ax: plt.axes=None):\n \"\"\" Generates a (3D) line plot of the trees contained in the skeleton object.\n\n Args:\n tree_inds (optional): Tree indices to be plotted.\n Default: All trees are plotted\n view (optional): Plot as 2D projection on orthonormal plane.\n Options: 'xy', 'xz', 'yz'\n Default: Plot as 3D projection\n colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is\n broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.\n Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can\n be passed as a str.\n Default: Skeleton colors (self.colors) are used\n unit (optional): Specifies in which unit the plot should be generated.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).\n Default: 'um' (micrometer)\n show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set\n to False. Default: True\n ax: Axes to be plotted on.\n\n Returns:\n ax: Axes which was plotted on\n \"\"\"\n if tree_inds is None:\n tree_inds = list(range(len(self.nodes)))\n elif tree_inds is int:\n tree_inds = [tree_inds]\n if colors is None:\n colors = self.colors\n elif type(colors) is str:\n cmap = cm.get_cmap(colors)\n colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]\n elif type(colors[0]) is not Sequence:\n colors = [colors] * self.num_trees()\n unit_factor = self._get_unit_factor(unit)\n allowed_views = ['xy', 'xz', 'yz']\n if view is not None:\n assert view in allowed_views, 'The passed view argument: {} is not among the allowed views: {}'.format(\n view, allowed_views)\n if ax is None:\n fig = plt.figure()\n if view is None:\n ax = fig.add_subplot(111, projection='3d')\n else:\n ax = fig.add_subplot(111, projection='rectilinear')\n elif view is None:\n assert ax.name == '3d', 'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'\n else:\n assert ax.name != '3d', 'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'\n lims_min = []\n lims_max = []\n for tree_idx in tree_inds:\n edges = self.edges[tree_idx].copy()\n nodes = self.nodes[tree_idx].copy()\n if len(nodes) > 0:\n nodes['position'] = nodes['position'].multiply(unit_factor)\n if view == 'xy':\n nodes = nodes.drop([('position', 'z')], axis=1)\n elif view == 'xz':\n nodes = nodes.drop([('position', 'y')], axis=1)\n elif view == 'yz':\n nodes = nodes.drop([('position', 'x')], axis=1)\n lims_min.append(np.min(nodes['position'].values, axis=0))\n lims_max.append(np.max(nodes['position'].values, axis=0))\n segments = []\n for edge in edges:\n n0 = nodes['position'][nodes.id == edge[0]].values[0]\n n1 = nodes['position'][nodes.id == edge[1]].values[0]\n segment = [[c for c in n0], [c for c in n1]]\n segments.append(segment)\n if view is None:\n line_collection = art3d.Line3DCollection(segments=\n segments, colors=colors[tree_idx])\n ax.add_collection3d(line_collection)\n else:\n line_collection = LineCollection(segments=segments,\n colors=colors[tree_idx])\n ax.add_collection(line_collection)\n lim_min = np.min(np.array(lims_min), axis=0)\n lim_max = np.max(np.array(lims_max), axis=0)\n ax.set_xlim(lim_min[0], lim_max[0])\n ax.set_ylim(lim_min[1], lim_max[1])\n if view is None:\n ax.set_zlim(lim_min[2], lim_max[2])\n else:\n ax.set_aspect('equal')\n if show:\n plt.show()\n return ax\n\n def write_nml(self, nml_write_path):\n \"\"\" Writes the present state of the skeleton object to a .nml file.\n\n Args:\n nml_write_path: Path to which .nml file should be written\n\n \"\"\"\n if self.num_trees() == 0:\n self.add_tree()\n nml = self._skeleton_to_nml()\n with open(nml_write_path, 'wb') as f:\n wknml.write_nml(f, nml)\n\n def node_id_to_idx(self, node_id: int) ->(int, int):\n \"\"\" Returns the linear tree and node indices for the provided node id.\"\"\"\n node_idx = None\n for tree_idx, nodes in enumerate(self.nodes):\n index_list = nodes[nodes['id'] == node_id].index.tolist()\n if index_list:\n node_idx = index_list[0]\n break\n assert node_idx is not None, 'node id {} does not exist'.format(node_id\n )\n return node_idx, tree_idx\n\n def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:\n \"\"\" Returns the node id for the provided tree and node idx.\"\"\"\n node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]\n return node_id\n\n def min_group_id(self) ->int:\n \"\"\" Returns lowest group id. If no groups are defined, return None\"\"\"\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmin(group_ids))\n return group_id\n\n def max_group_id(self) ->int:\n \"\"\" Returns highest group id. If no groups are defined, return None\"\"\"\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmax(group_ids))\n return group_id\n\n def min_node_id(self) ->int:\n \"\"\" Returns lowest global node id.\"\"\"\n if len(self.nodes) > 0:\n min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for\n nodes in self.nodes])\n else:\n min_node_id = 0\n return min_node_id\n\n def max_node_id(self) ->int:\n \"\"\" Returns highest global node id.\"\"\"\n if len(self.nodes) > 0:\n max_node_id = max([(max(nodes.id) if len(nodes) > 0 else 0) for\n nodes in self.nodes])\n else:\n max_node_id = 0\n return max_node_id\n\n def min_tree_id(self) ->int:\n \"\"\" Returns lowest global tree id.\"\"\"\n return min(self.tree_ids) if len(self.tree_ids) > 0 else 0\n\n def max_tree_id(self) ->int:\n \"\"\" Returns highest global tree id.\"\"\"\n return max(self.tree_ids) if len(self.tree_ids) > 0 else 0\n\n def num_trees(self) ->int:\n \"\"\"Returns number of trees contained in skeleton object.\"\"\"\n return len(self.nodes)\n\n def groups_ids(self) ->List[int]:\n \"\"\" Returns all ids defined in groups tree\"\"\"\n _, groups_ids = Skeleton._group_get_ids(self.groups)\n return groups_ids\n\n def _get_unit_factor(self, unit: str) ->np.ndarray:\n \"\"\" Returns factor for unit conversion\n\n Args:\n unit: Unit for which to return the conversion factor.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)\n\n Returns:\n unit_factor (shape=(3,)): Unit conversion factors\n \"\"\"\n unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.\n parameters.scale), 'um': np.array(self.parameters.scale) / 1000}\n assert unit in unit_factors.keys(), 'Invalid unit'\n unit_factor = unit_factors[unit]\n return unit_factor\n\n def _reset_node_ids(self, start_id: int):\n \"\"\" Resets node ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest node id should be set.\n \"\"\"\n add_id = start_id - self.min_node_id()\n for tree_idx, _ in enumerate(self.nodes):\n self.nodes[tree_idx].nodes['id'] += add_id\n self.edges[tree_idx] += add_id\n\n def _reset_tree_ids(self, start_id: int):\n \"\"\" Resets tree ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest tree id should be set.\n \"\"\"\n add_id = start_id - self.min_tree_id()\n self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]\n\n def _reset_group_ids(self, start_id: int):\n \"\"\" Resets group ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest group id should be set.\n \"\"\"\n min_group_id = self.min_group_id()\n if min_group_id is not None:\n add_id = start_id - min_group_id\n self.group_ids = [(i + add_id if i is not None else i) for i in\n self.group_ids]\n self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda\n x: x + add_id) for group in self.groups]\n <mask token>\n\n def _nml_to_skeleton(self, nml):\n \"\"\" Converts wknml to skeleton data structures.\"\"\"\n self.groups = nml.groups\n self.branchpoints = nml.branchpoints\n self.parameters = Parameters(**nml.parameters._asdict())\n for tree in nml.trees:\n self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree\n .nodes, nml_comments=nml.comments), edges=np.array([(edge.\n source, edge.target) for edge in tree.edges]), group_id=\n tree.groupId, name=tree.name, color=tree.color)\n\n def _skeleton_to_nml(self):\n \"\"\" Converts skeleton to wknml data structures.\"\"\"\n trees = []\n for tree_idx, tree_id in enumerate(self.tree_ids):\n nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])\n nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])\n tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name\n =self.names[tree_idx], groupId=self.group_ids[tree_idx],\n nodes=nml_nodes, edges=nml_edges)\n trees.append(tree)\n nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.\n _asdict()), trees=trees, branchpoints=self.branchpoints,\n comments=self._skeleton_to_nml_comments(), groups=self.groups)\n return nml\n\n def _skeleton_to_nml_comments(self):\n \"\"\" Converts skeleton to wknml comments.\"\"\"\n nml_comments = []\n for nodes in self.nodes:\n comment_nodes = nodes[nodes['comment'].notnull()]\n for _, row in comment_nodes.iterrows():\n nml_comment = wknml.Comment(node=row['id'].values[0],\n content=row['comment'].values[0])\n nml_comments.append(nml_comment)\n return nml_comments\n\n @staticmethod\n def define_parameters(name: str, scale: Tuple[float, float, float],\n offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,\n editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),\n editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:\n float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=\n None, userBoundingBox: Tuple[int, int, int, int, int, int]=None\n ) ->Parameters:\n parameters = Parameters(name=name, scale=scale, offset=offset, time\n =time, editPosition=editPosition, editRotation=editRotation,\n zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,\n userBoundingBox=userBoundingBox)\n return parameters\n\n @staticmethod\n def get_distance(positions: np.ndarray, position: np.ndarray,\n unit_factor: np.ndarray=None):\n \"\"\" Get the (euclidean) distances between positions and a target position\n\n Args:\n positions (N x 3): Array holding (multiple) x, y, z positions\n position (1 x 3): Array holding x, y, z position to which the distances should be computed\n unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)\n\n Returns:\n distances: Arrays holding distances\n\n \"\"\"\n if unit_factor is None:\n unit_factor = np.array([1, 1, 1])\n distances = np.sqrt(np.sum(((positions - position) * unit_factor.\n reshape(1, 3)) ** 2, axis=1))\n return distances\n\n @staticmethod\n def _nml_nodes_to_nodes(nml_nodes, nml_comments):\n \"\"\" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass).\"\"\"\n data = [(node.id, node.position[0], node.position[1], node.position\n [2], node.radius, node.rotation[0], node.rotation[1], node.\n rotation[2], node.inVp, node.inMag, node.bitDepth, node.\n interpolation, node.time, np.nan) for node in nml_nodes]\n nodes = Nodes(data=data)\n comment_node_ids = [comment.node for comment in nml_comments]\n comment_strings = [comment.content for comment in nml_comments]\n nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]\n for id in nodes_ids_comments:\n id_comment = comment_strings[comment_node_ids.index(id)]\n nodes.loc[nodes.id == id, ('comment', '')] = id_comment\n return nodes\n\n @staticmethod\n def _nodes_to_nml_nodes(nodes):\n \"\"\" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples).\"\"\"\n nml_nodes = []\n for idx, row in nodes.iterrows():\n nml_node = wknml.Node(id=int(row.id), position=tuple(row.\n position.values), radius=float(row.radius), rotation=tuple(\n row.rotation.values), inVp=int(row.inVp), inMag=int(row.\n inMag), bitDepth=int(row.bitDepth), interpolation=bool(row.\n interpolation.values), time=int(row.time))\n nml_nodes.append(nml_node)\n return nml_nodes\n <mask token>\n <mask token>\n\n @staticmethod\n def _group_parent(groups, id, parent_id=None, parent_idx=None,\n child_idx=None):\n \"\"\" Returns the id of the parent group for a (child) group with specified id.\"\"\"\n for group in groups:\n if id in [x.id for x in group.children]:\n parent_id = group.id\n parent_idx = groups.index(group)\n child_idx = [x.id for x in group.children].index(id)\n else:\n parent_id, parent_idx, child_idx = Skeleton._group_parent(group\n .children, id, parent_id, parent_idx, child_idx)\n return parent_id, parent_idx, child_idx\n\n @staticmethod\n def _group_modify_id(group, id_modifier):\n \"\"\" Modifies group ids with the passed id_modifier (e.g. lambda) function.\"\"\"\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton.\n _group_modify_id(g, id_modifier), group.children)))\n return group\n\n @staticmethod\n def _group_get_ids(groups, ids=[]):\n for group in groups:\n ids.append(group.id)\n Skeleton._group_get_ids(group.children, ids)\n return groups, ids\n\n @staticmethod\n def _get_graph(nodes: Nodes, edges: np.ndarray):\n \"\"\" Returns the networkx graph representation of provided nodes and edges.\"\"\"\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n return graph\n\n @staticmethod\n def _num_conn_comp(graph):\n \"\"\" Returns number of connected components for graph\"\"\"\n return nx.number_connected_components(graph)\n",
"step-3": "<mask token>\n\n\nclass Skeleton:\n <mask token>\n <mask token>\n\n def __init__(self, nml_path: str=None, parameters: Parameters=None,\n strict=True):\n \"\"\" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments\n\n Args:\n nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the\n trees and additional properties specified in the .nml file\n parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties\n of the skeleton.\n strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with\n webKnossos. Default: True\n\n Examples:\n Using nml_path:\n nml_path = '/path/to/example.nml'\n skel = Skeleton(nml_path)\n\n Using parameters:\n parameters = Skeleton.define_parameters(name=\"2017-01-12_FD0156-2\", scale=(11.24, 11.24, 32))\n skel = Skeleton(parameters=parameters)\n \"\"\"\n assert (nml_path is not None) ^ (parameters is not None\n ), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'\n self.nodes = list()\n self.edges = list()\n self.names = list()\n self.colors = list()\n self.tree_ids = list()\n self.group_ids = list()\n self.groups = list()\n self.branchpoints = list()\n self.parameters = Parameters()\n self.nml_path = str()\n self.strict = strict\n self.defaults = self.DEFAULTS\n if nml_path is not None:\n assert os.path.exists(nml_path), 'not a valid path: {}'.format(\n nml_path)\n try:\n with open(nml_path, 'rb') as f:\n nml = wknml.parse_nml(f)\n except IOError:\n print('not a valid nml file: {}'.format(nml_path))\n self._nml_to_skeleton(nml)\n else:\n assert type(parameters\n ) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'\n self._parameters_to_skeleton(parameters)\n\n def add_tree(self, nodes: Nodes=Nodes(), edges: Union[List[Tuple[int,\n int]], np.ndarray]=None, tree_id: int=None, name: str='', group_id:\n int=None, color: Tuple[float, float, float, float]=None):\n \"\"\" Appends new tree to skeleton.\n\n Args:\n nodes (optional): Nodes representing tree to be added\n edges (optional): Edges representing tree to be added\n tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1\n name (optional): Name to be used for new tree. Default: Empty str\n group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.\n Default: None\n color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n if edges is None:\n edges = np.empty((0, 2), dtype=np.uint32)\n elif type(edges) is list:\n edges = np.asarray(edges)\n if self.strict & (len(nodes) > 1):\n assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)\n ) == 1, 'Added tree consists of more than one connected component'\n if tree_id is None:\n tree_id = self.max_tree_id() + 1\n if (group_id is not None) & (group_id not in self.groups_ids()):\n self.add_group(id=group_id)\n if color is None:\n color = self.defaults['tree']['color']\n self.nodes.append(nodes)\n self.edges.append(edges)\n self.tree_ids.append(tree_id)\n self.group_ids.append(group_id)\n self.names.append(name)\n self.colors.append(color)\n\n def add_tree_from_skel(self, skel: 'Skeleton', tree_idx: int, group_id:\n int=None, name: str=None):\n \"\"\" Appends a specific tree contained in a different skeleton object to the skeleton.\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n tree_idx: Source tree index of tree to be added\n group_id (optional): Target group id to which the added tree should be assigned. Default: None\n name (optional): Target name for the added tree\n \"\"\"\n if group_id not in self.groups_ids():\n self.add_group(id=group_id)\n if name is None:\n name = skel.names[tree_idx]\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n self.nodes = self.nodes + [skel.nodes[tree_idx]]\n self.edges = self.edges + [skel.edges[tree_idx]]\n self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]\n self.group_ids = self.group_ids + [group_id]\n self.names = self.names + [name]\n self.colors = self.colors + [skel.colors[tree_idx]]\n return self\n\n def add_trees_from_skel(self, skel: 'Skeleton'):\n \"\"\" Appends all trees contained in a different skeleton object to the skeleton.\n\n This method attempts to preserve the relative group structure found in the skeleton object to be added\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n \"\"\"\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n max_group_id = self.max_group_id()\n if max_group_id is not None:\n skel._reset_group_ids(max_group_id + 1)\n self.nodes = self.nodes + skel.nodes\n self.edges = self.edges + skel.edges\n self.tree_ids = self.tree_ids + skel.tree_ids\n self.group_ids = self.group_ids + skel.group_ids\n self.groups = self.groups + skel.groups\n self.names = self.names + skel.names\n self.colors = self.colors + skel.colors\n return self\n\n def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,\n group_ids: List[int]=None, names: List[str]=None, colors: List[\n Tuple[float, float, float, float]]=None):\n \"\"\" Appends each of the specified nodes as separate trees to the skeleton (1 node each).\n\n Args:\n nodes: Nodes representing the trees to be added\n tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]\n group_ids (optional): Group ids to be assigned to the newly added trees. Default: None\n names (optional): Names to be assigned to the newly added trees.\n colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n if tree_ids is None:\n tree_id_start = self.max_tree_id() + 1\n tree_id_end = tree_id_start + len(nodes)\n tree_ids = list(range(tree_id_start, tree_id_end))\n if group_ids is None:\n group_ids = [None for x in range(len(nodes))]\n if names is None:\n names = ['' for x in range(len(nodes))]\n if colors is None:\n colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]\n for node_idx, _ in nodes.iterrows():\n self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=\n tree_ids[node_idx], group_id=group_ids[node_idx], name=\n names[node_idx], color=colors[node_idx])\n\n def delete_tree(self, idx: int=None, id: int=None):\n \"\"\" Deletes tree with specified idx or id.\n\n Args:\n idx: Linear index of tree to be deleted\n id: Id of tree to be deleted\n\n \"\"\"\n if id is not None:\n idx = self.tree_ids.index(id)\n self.nodes.pop(idx)\n self.edges.pop(idx)\n self.names.pop(idx)\n self.colors.pop(idx)\n self.tree_ids.pop(idx)\n self.group_ids.pop(idx)\n\n def add_group(self, parent_id: int=None, id: int=None, name: str=None):\n \"\"\" Adds a new group to skeleton object.\n\n Args:\n parent_id: Parent group id to which new group is added as a child. Default: None (root group)\n id: Id of new group to be added. Default: Current max group id + 1\n name: Name of new group to be added. Default: 'Group {}'.format(id)\n\n Returns:\n id: Id of added group\n name: Name of added group\n\n \"\"\"\n if parent_id is not None:\n assert parent_id in self.group_ids, 'Parent id does not exist'\n if id is None:\n id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)\n else:\n assert id not in self.groups_ids(), 'Id already exists'\n if name is None:\n name = 'Group {}'.format(id)\n new_group = wknml.Group(id, name, [])\n if parent_id is None:\n self.groups.append(new_group)\n else:\n self.groups = Skeleton._group_append(self.groups, parent_id,\n new_group)\n return id, name\n\n def delete_group(self, id, target_id):\n pass\n\n def define_nodes(self, position_x: List[int], position_y: List[int],\n position_z: List[int], id: List[int]=None, radius: Optional[List[\n int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:\n Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,\n inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,\n bitDepth: Optional[List[int]]=None, interpolation: Optional[List[\n bool]]=None, time: Optional[List[int]]=None, comment: Optional[List\n [int]]=None) ->Nodes:\n \"\"\" Generates new nodes table from data.\n\n Args:\n position_x: Node position x\n position_y: Node position y\n position_z: Node position z\n id (optional): (Globally unique) Node id. Default: New unique ids are generated\n radius (optional): Node radius\n rotation_x (optional): Node rotation x\n rotation_y (optional): Node rotation y\n rotation_z (optional): Node rotation z\n inVP (optional): Viewport index in which node was placed\n inMag (optional): (De-)Magnification factor in which node was placed\n bitDepth (optional): Bit (Color) Depth in which node was placed\n interpolation (optional): Interpolation state in which node was placed\n time (optional): Time stamp at which node was placed\n comment (optional): Comment associated with node\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n if id is None:\n id_max = self.max_node_id()\n id = list(range(id_max + 1, id_max + len(position_x) + 1))\n nodes = Nodes.from_list(id, position_x, position_y, position_z,\n radius, rotation_x, rotation_y, rotation_z, inVP, inMag,\n bitDepth, interpolation, time, comment)\n return nodes\n\n def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:\n \"\"\" Generates new nodes table from positions only (node ids are generated automatically).\n\n Args:\n positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n id_max = self.max_node_id()\n id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)\n ).reshape(-1, 1)\n nodes = Nodes.from_numpy(np.append(id, positions, axis=1))\n return nodes\n\n def get_distances_to_node(self, positions: Union[Sequence[Tuple[int,\n int, int]], np.ndarray], node_id: int=None, tree_idx: int=None,\n node_idx: int=None, unit: str='um') ->List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions\n\n Args:\n positions (N x 3): Target (x,y,z) positions to which the distances should be computed\n node_id: Node id of the node for which the distances should be computed\n tree_idx: Tree idx of the node for which the distances should be computed\n node_idx: Node idx of the node for which the distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n assert (node_id is not None) ^ (tree_idx is not None) & (node_idx\n is not None\n ), 'Either provide node_id or both tree_idx and node_idx'\n if type(positions) is not np.ndarray:\n positions = np.array(positions)\n if node_id is not None:\n node_idx, tree_idx = self.node_id_to_idx(node_id)\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(positions, np.array(self.nodes[\n tree_idx].position.values[node_idx]), unit_factor)\n return distances\n\n def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],\n np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position\n\n Args:\n position (1 x 3): Target (x,y,z) position to which the node distances should be computed\n tree_idx: Tree idx for which node distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n if type(position) is not np.ndarray:\n position = np.array(position)\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].\n position.values), position, unit_factor)\n return distances\n\n def get_graph(self, tree_idx):\n \"\"\" Returns the networkx graph representation of a tree.\n\n Args:\n tree_idx: Linear index of the tree to be returned as graph object\n\n Returns:\n graph: Graph object\n\n \"\"\"\n nodes = self.nodes[tree_idx]\n edges = self.edges[tree_idx]\n graph = Skeleton._get_graph(nodes, edges)\n return graph\n\n def get_shortest_path(self, node_id_start: int, node_id_end: int) ->List[\n int]:\n \"\"\" Returns the shortest path between two nodes of a tree.\n\n Args:\n node_id_start: Node id of start node\n node_id_end: Node id of end node\n\n Returns:\n shortest_path: Node indices comprising the shortest path\n\n \"\"\"\n _, tree_idx_start = self.node_id_to_idx(node_id_start)\n _, tree_idx_end = self.node_id_to_idx(node_id_end)\n assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'\n graph = self.get_graph(tree_idx_start)\n shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)\n return shortest_path\n\n def plot(self, tree_inds: Union[int, List[int]]=None, view: str=None,\n colors: Union[Tuple[float, float, float, float], List[Tuple[float,\n float, float, float]], str]=None, unit: str='um', show: bool=True,\n ax: plt.axes=None):\n \"\"\" Generates a (3D) line plot of the trees contained in the skeleton object.\n\n Args:\n tree_inds (optional): Tree indices to be plotted.\n Default: All trees are plotted\n view (optional): Plot as 2D projection on orthonormal plane.\n Options: 'xy', 'xz', 'yz'\n Default: Plot as 3D projection\n colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is\n broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.\n Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can\n be passed as a str.\n Default: Skeleton colors (self.colors) are used\n unit (optional): Specifies in which unit the plot should be generated.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).\n Default: 'um' (micrometer)\n show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set\n to False. Default: True\n ax: Axes to be plotted on.\n\n Returns:\n ax: Axes which was plotted on\n \"\"\"\n if tree_inds is None:\n tree_inds = list(range(len(self.nodes)))\n elif tree_inds is int:\n tree_inds = [tree_inds]\n if colors is None:\n colors = self.colors\n elif type(colors) is str:\n cmap = cm.get_cmap(colors)\n colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]\n elif type(colors[0]) is not Sequence:\n colors = [colors] * self.num_trees()\n unit_factor = self._get_unit_factor(unit)\n allowed_views = ['xy', 'xz', 'yz']\n if view is not None:\n assert view in allowed_views, 'The passed view argument: {} is not among the allowed views: {}'.format(\n view, allowed_views)\n if ax is None:\n fig = plt.figure()\n if view is None:\n ax = fig.add_subplot(111, projection='3d')\n else:\n ax = fig.add_subplot(111, projection='rectilinear')\n elif view is None:\n assert ax.name == '3d', 'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'\n else:\n assert ax.name != '3d', 'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'\n lims_min = []\n lims_max = []\n for tree_idx in tree_inds:\n edges = self.edges[tree_idx].copy()\n nodes = self.nodes[tree_idx].copy()\n if len(nodes) > 0:\n nodes['position'] = nodes['position'].multiply(unit_factor)\n if view == 'xy':\n nodes = nodes.drop([('position', 'z')], axis=1)\n elif view == 'xz':\n nodes = nodes.drop([('position', 'y')], axis=1)\n elif view == 'yz':\n nodes = nodes.drop([('position', 'x')], axis=1)\n lims_min.append(np.min(nodes['position'].values, axis=0))\n lims_max.append(np.max(nodes['position'].values, axis=0))\n segments = []\n for edge in edges:\n n0 = nodes['position'][nodes.id == edge[0]].values[0]\n n1 = nodes['position'][nodes.id == edge[1]].values[0]\n segment = [[c for c in n0], [c for c in n1]]\n segments.append(segment)\n if view is None:\n line_collection = art3d.Line3DCollection(segments=\n segments, colors=colors[tree_idx])\n ax.add_collection3d(line_collection)\n else:\n line_collection = LineCollection(segments=segments,\n colors=colors[tree_idx])\n ax.add_collection(line_collection)\n lim_min = np.min(np.array(lims_min), axis=0)\n lim_max = np.max(np.array(lims_max), axis=0)\n ax.set_xlim(lim_min[0], lim_max[0])\n ax.set_ylim(lim_min[1], lim_max[1])\n if view is None:\n ax.set_zlim(lim_min[2], lim_max[2])\n else:\n ax.set_aspect('equal')\n if show:\n plt.show()\n return ax\n\n def write_nml(self, nml_write_path):\n \"\"\" Writes the present state of the skeleton object to a .nml file.\n\n Args:\n nml_write_path: Path to which .nml file should be written\n\n \"\"\"\n if self.num_trees() == 0:\n self.add_tree()\n nml = self._skeleton_to_nml()\n with open(nml_write_path, 'wb') as f:\n wknml.write_nml(f, nml)\n\n def node_id_to_idx(self, node_id: int) ->(int, int):\n \"\"\" Returns the linear tree and node indices for the provided node id.\"\"\"\n node_idx = None\n for tree_idx, nodes in enumerate(self.nodes):\n index_list = nodes[nodes['id'] == node_id].index.tolist()\n if index_list:\n node_idx = index_list[0]\n break\n assert node_idx is not None, 'node id {} does not exist'.format(node_id\n )\n return node_idx, tree_idx\n\n def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:\n \"\"\" Returns the node id for the provided tree and node idx.\"\"\"\n node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]\n return node_id\n\n def min_group_id(self) ->int:\n \"\"\" Returns lowest group id. If no groups are defined, return None\"\"\"\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmin(group_ids))\n return group_id\n\n def max_group_id(self) ->int:\n \"\"\" Returns highest group id. If no groups are defined, return None\"\"\"\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmax(group_ids))\n return group_id\n\n def min_node_id(self) ->int:\n \"\"\" Returns lowest global node id.\"\"\"\n if len(self.nodes) > 0:\n min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for\n nodes in self.nodes])\n else:\n min_node_id = 0\n return min_node_id\n\n def max_node_id(self) ->int:\n \"\"\" Returns highest global node id.\"\"\"\n if len(self.nodes) > 0:\n max_node_id = max([(max(nodes.id) if len(nodes) > 0 else 0) for\n nodes in self.nodes])\n else:\n max_node_id = 0\n return max_node_id\n\n def min_tree_id(self) ->int:\n \"\"\" Returns lowest global tree id.\"\"\"\n return min(self.tree_ids) if len(self.tree_ids) > 0 else 0\n\n def max_tree_id(self) ->int:\n \"\"\" Returns highest global tree id.\"\"\"\n return max(self.tree_ids) if len(self.tree_ids) > 0 else 0\n\n def num_trees(self) ->int:\n \"\"\"Returns number of trees contained in skeleton object.\"\"\"\n return len(self.nodes)\n\n def groups_ids(self) ->List[int]:\n \"\"\" Returns all ids defined in groups tree\"\"\"\n _, groups_ids = Skeleton._group_get_ids(self.groups)\n return groups_ids\n\n def _get_unit_factor(self, unit: str) ->np.ndarray:\n \"\"\" Returns factor for unit conversion\n\n Args:\n unit: Unit for which to return the conversion factor.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)\n\n Returns:\n unit_factor (shape=(3,)): Unit conversion factors\n \"\"\"\n unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.\n parameters.scale), 'um': np.array(self.parameters.scale) / 1000}\n assert unit in unit_factors.keys(), 'Invalid unit'\n unit_factor = unit_factors[unit]\n return unit_factor\n\n def _reset_node_ids(self, start_id: int):\n \"\"\" Resets node ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest node id should be set.\n \"\"\"\n add_id = start_id - self.min_node_id()\n for tree_idx, _ in enumerate(self.nodes):\n self.nodes[tree_idx].nodes['id'] += add_id\n self.edges[tree_idx] += add_id\n\n def _reset_tree_ids(self, start_id: int):\n \"\"\" Resets tree ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest tree id should be set.\n \"\"\"\n add_id = start_id - self.min_tree_id()\n self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]\n\n def _reset_group_ids(self, start_id: int):\n \"\"\" Resets group ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest group id should be set.\n \"\"\"\n min_group_id = self.min_group_id()\n if min_group_id is not None:\n add_id = start_id - min_group_id\n self.group_ids = [(i + add_id if i is not None else i) for i in\n self.group_ids]\n self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda\n x: x + add_id) for group in self.groups]\n <mask token>\n\n def _nml_to_skeleton(self, nml):\n \"\"\" Converts wknml to skeleton data structures.\"\"\"\n self.groups = nml.groups\n self.branchpoints = nml.branchpoints\n self.parameters = Parameters(**nml.parameters._asdict())\n for tree in nml.trees:\n self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree\n .nodes, nml_comments=nml.comments), edges=np.array([(edge.\n source, edge.target) for edge in tree.edges]), group_id=\n tree.groupId, name=tree.name, color=tree.color)\n\n def _skeleton_to_nml(self):\n \"\"\" Converts skeleton to wknml data structures.\"\"\"\n trees = []\n for tree_idx, tree_id in enumerate(self.tree_ids):\n nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])\n nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])\n tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name\n =self.names[tree_idx], groupId=self.group_ids[tree_idx],\n nodes=nml_nodes, edges=nml_edges)\n trees.append(tree)\n nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.\n _asdict()), trees=trees, branchpoints=self.branchpoints,\n comments=self._skeleton_to_nml_comments(), groups=self.groups)\n return nml\n\n def _skeleton_to_nml_comments(self):\n \"\"\" Converts skeleton to wknml comments.\"\"\"\n nml_comments = []\n for nodes in self.nodes:\n comment_nodes = nodes[nodes['comment'].notnull()]\n for _, row in comment_nodes.iterrows():\n nml_comment = wknml.Comment(node=row['id'].values[0],\n content=row['comment'].values[0])\n nml_comments.append(nml_comment)\n return nml_comments\n\n @staticmethod\n def define_parameters(name: str, scale: Tuple[float, float, float],\n offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,\n editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),\n editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:\n float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=\n None, userBoundingBox: Tuple[int, int, int, int, int, int]=None\n ) ->Parameters:\n parameters = Parameters(name=name, scale=scale, offset=offset, time\n =time, editPosition=editPosition, editRotation=editRotation,\n zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,\n userBoundingBox=userBoundingBox)\n return parameters\n\n @staticmethod\n def get_distance(positions: np.ndarray, position: np.ndarray,\n unit_factor: np.ndarray=None):\n \"\"\" Get the (euclidean) distances between positions and a target position\n\n Args:\n positions (N x 3): Array holding (multiple) x, y, z positions\n position (1 x 3): Array holding x, y, z position to which the distances should be computed\n unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)\n\n Returns:\n distances: Arrays holding distances\n\n \"\"\"\n if unit_factor is None:\n unit_factor = np.array([1, 1, 1])\n distances = np.sqrt(np.sum(((positions - position) * unit_factor.\n reshape(1, 3)) ** 2, axis=1))\n return distances\n\n @staticmethod\n def _nml_nodes_to_nodes(nml_nodes, nml_comments):\n \"\"\" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass).\"\"\"\n data = [(node.id, node.position[0], node.position[1], node.position\n [2], node.radius, node.rotation[0], node.rotation[1], node.\n rotation[2], node.inVp, node.inMag, node.bitDepth, node.\n interpolation, node.time, np.nan) for node in nml_nodes]\n nodes = Nodes(data=data)\n comment_node_ids = [comment.node for comment in nml_comments]\n comment_strings = [comment.content for comment in nml_comments]\n nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]\n for id in nodes_ids_comments:\n id_comment = comment_strings[comment_node_ids.index(id)]\n nodes.loc[nodes.id == id, ('comment', '')] = id_comment\n return nodes\n\n @staticmethod\n def _nodes_to_nml_nodes(nodes):\n \"\"\" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples).\"\"\"\n nml_nodes = []\n for idx, row in nodes.iterrows():\n nml_node = wknml.Node(id=int(row.id), position=tuple(row.\n position.values), radius=float(row.radius), rotation=tuple(\n row.rotation.values), inVp=int(row.inVp), inMag=int(row.\n inMag), bitDepth=int(row.bitDepth), interpolation=bool(row.\n interpolation.values), time=int(row.time))\n nml_nodes.append(nml_node)\n return nml_nodes\n <mask token>\n\n @staticmethod\n def _group_append(groups, id, new_group):\n \"\"\" Appends new group as a child of existing group with specified id. Currently only works up to depth=3.\"\"\"\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n path_inds = list(reversed(path_inds))\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=\n new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]\n ]._replace(children=new_group)\n return groups\n\n @staticmethod\n def _group_parent(groups, id, parent_id=None, parent_idx=None,\n child_idx=None):\n \"\"\" Returns the id of the parent group for a (child) group with specified id.\"\"\"\n for group in groups:\n if id in [x.id for x in group.children]:\n parent_id = group.id\n parent_idx = groups.index(group)\n child_idx = [x.id for x in group.children].index(id)\n else:\n parent_id, parent_idx, child_idx = Skeleton._group_parent(group\n .children, id, parent_id, parent_idx, child_idx)\n return parent_id, parent_idx, child_idx\n\n @staticmethod\n def _group_modify_id(group, id_modifier):\n \"\"\" Modifies group ids with the passed id_modifier (e.g. lambda) function.\"\"\"\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton.\n _group_modify_id(g, id_modifier), group.children)))\n return group\n\n @staticmethod\n def _group_get_ids(groups, ids=[]):\n for group in groups:\n ids.append(group.id)\n Skeleton._group_get_ids(group.children, ids)\n return groups, ids\n\n @staticmethod\n def _get_graph(nodes: Nodes, edges: np.ndarray):\n \"\"\" Returns the networkx graph representation of provided nodes and edges.\"\"\"\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n return graph\n\n @staticmethod\n def _num_conn_comp(graph):\n \"\"\" Returns number of connected components for graph\"\"\"\n return nx.number_connected_components(graph)\n",
"step-4": "<mask token>\n\n\nclass Skeleton:\n <mask token>\n <mask token>\n\n def __init__(self, nml_path: str=None, parameters: Parameters=None,\n strict=True):\n \"\"\" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments\n\n Args:\n nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the\n trees and additional properties specified in the .nml file\n parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties\n of the skeleton.\n strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with\n webKnossos. Default: True\n\n Examples:\n Using nml_path:\n nml_path = '/path/to/example.nml'\n skel = Skeleton(nml_path)\n\n Using parameters:\n parameters = Skeleton.define_parameters(name=\"2017-01-12_FD0156-2\", scale=(11.24, 11.24, 32))\n skel = Skeleton(parameters=parameters)\n \"\"\"\n assert (nml_path is not None) ^ (parameters is not None\n ), 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'\n self.nodes = list()\n self.edges = list()\n self.names = list()\n self.colors = list()\n self.tree_ids = list()\n self.group_ids = list()\n self.groups = list()\n self.branchpoints = list()\n self.parameters = Parameters()\n self.nml_path = str()\n self.strict = strict\n self.defaults = self.DEFAULTS\n if nml_path is not None:\n assert os.path.exists(nml_path), 'not a valid path: {}'.format(\n nml_path)\n try:\n with open(nml_path, 'rb') as f:\n nml = wknml.parse_nml(f)\n except IOError:\n print('not a valid nml file: {}'.format(nml_path))\n self._nml_to_skeleton(nml)\n else:\n assert type(parameters\n ) is Parameters, 'provided parameters must be of type wkskel.types.Parameters'\n self._parameters_to_skeleton(parameters)\n\n def add_tree(self, nodes: Nodes=Nodes(), edges: Union[List[Tuple[int,\n int]], np.ndarray]=None, tree_id: int=None, name: str='', group_id:\n int=None, color: Tuple[float, float, float, float]=None):\n \"\"\" Appends new tree to skeleton.\n\n Args:\n nodes (optional): Nodes representing tree to be added\n edges (optional): Edges representing tree to be added\n tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1\n name (optional): Name to be used for new tree. Default: Empty str\n group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.\n Default: None\n color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n if edges is None:\n edges = np.empty((0, 2), dtype=np.uint32)\n elif type(edges) is list:\n edges = np.asarray(edges)\n if self.strict & (len(nodes) > 1):\n assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)\n ) == 1, 'Added tree consists of more than one connected component'\n if tree_id is None:\n tree_id = self.max_tree_id() + 1\n if (group_id is not None) & (group_id not in self.groups_ids()):\n self.add_group(id=group_id)\n if color is None:\n color = self.defaults['tree']['color']\n self.nodes.append(nodes)\n self.edges.append(edges)\n self.tree_ids.append(tree_id)\n self.group_ids.append(group_id)\n self.names.append(name)\n self.colors.append(color)\n\n def add_tree_from_skel(self, skel: 'Skeleton', tree_idx: int, group_id:\n int=None, name: str=None):\n \"\"\" Appends a specific tree contained in a different skeleton object to the skeleton.\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n tree_idx: Source tree index of tree to be added\n group_id (optional): Target group id to which the added tree should be assigned. Default: None\n name (optional): Target name for the added tree\n \"\"\"\n if group_id not in self.groups_ids():\n self.add_group(id=group_id)\n if name is None:\n name = skel.names[tree_idx]\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n self.nodes = self.nodes + [skel.nodes[tree_idx]]\n self.edges = self.edges + [skel.edges[tree_idx]]\n self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]\n self.group_ids = self.group_ids + [group_id]\n self.names = self.names + [name]\n self.colors = self.colors + [skel.colors[tree_idx]]\n return self\n\n def add_trees_from_skel(self, skel: 'Skeleton'):\n \"\"\" Appends all trees contained in a different skeleton object to the skeleton.\n\n This method attempts to preserve the relative group structure found in the skeleton object to be added\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n \"\"\"\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n max_group_id = self.max_group_id()\n if max_group_id is not None:\n skel._reset_group_ids(max_group_id + 1)\n self.nodes = self.nodes + skel.nodes\n self.edges = self.edges + skel.edges\n self.tree_ids = self.tree_ids + skel.tree_ids\n self.group_ids = self.group_ids + skel.group_ids\n self.groups = self.groups + skel.groups\n self.names = self.names + skel.names\n self.colors = self.colors + skel.colors\n return self\n\n def add_nodes_as_trees(self, nodes: Nodes, tree_ids: List[int]=None,\n group_ids: List[int]=None, names: List[str]=None, colors: List[\n Tuple[float, float, float, float]]=None):\n \"\"\" Appends each of the specified nodes as separate trees to the skeleton (1 node each).\n\n Args:\n nodes: Nodes representing the trees to be added\n tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]\n group_ids (optional): Group ids to be assigned to the newly added trees. Default: None\n names (optional): Names to be assigned to the newly added trees.\n colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n if tree_ids is None:\n tree_id_start = self.max_tree_id() + 1\n tree_id_end = tree_id_start + len(nodes)\n tree_ids = list(range(tree_id_start, tree_id_end))\n if group_ids is None:\n group_ids = [None for x in range(len(nodes))]\n if names is None:\n names = ['' for x in range(len(nodes))]\n if colors is None:\n colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]\n for node_idx, _ in nodes.iterrows():\n self.add_tree(nodes=nodes[node_idx:node_idx + 1], tree_id=\n tree_ids[node_idx], group_id=group_ids[node_idx], name=\n names[node_idx], color=colors[node_idx])\n\n def delete_tree(self, idx: int=None, id: int=None):\n \"\"\" Deletes tree with specified idx or id.\n\n Args:\n idx: Linear index of tree to be deleted\n id: Id of tree to be deleted\n\n \"\"\"\n if id is not None:\n idx = self.tree_ids.index(id)\n self.nodes.pop(idx)\n self.edges.pop(idx)\n self.names.pop(idx)\n self.colors.pop(idx)\n self.tree_ids.pop(idx)\n self.group_ids.pop(idx)\n\n def add_group(self, parent_id: int=None, id: int=None, name: str=None):\n \"\"\" Adds a new group to skeleton object.\n\n Args:\n parent_id: Parent group id to which new group is added as a child. Default: None (root group)\n id: Id of new group to be added. Default: Current max group id + 1\n name: Name of new group to be added. Default: 'Group {}'.format(id)\n\n Returns:\n id: Id of added group\n name: Name of added group\n\n \"\"\"\n if parent_id is not None:\n assert parent_id in self.group_ids, 'Parent id does not exist'\n if id is None:\n id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)\n else:\n assert id not in self.groups_ids(), 'Id already exists'\n if name is None:\n name = 'Group {}'.format(id)\n new_group = wknml.Group(id, name, [])\n if parent_id is None:\n self.groups.append(new_group)\n else:\n self.groups = Skeleton._group_append(self.groups, parent_id,\n new_group)\n return id, name\n\n def delete_group(self, id, target_id):\n pass\n\n def define_nodes(self, position_x: List[int], position_y: List[int],\n position_z: List[int], id: List[int]=None, radius: Optional[List[\n int]]=None, rotation_x: Optional[List[float]]=None, rotation_y:\n Optional[List[float]]=None, rotation_z: Optional[List[float]]=None,\n inVP: Optional[List[int]]=None, inMag: Optional[List[int]]=None,\n bitDepth: Optional[List[int]]=None, interpolation: Optional[List[\n bool]]=None, time: Optional[List[int]]=None, comment: Optional[List\n [int]]=None) ->Nodes:\n \"\"\" Generates new nodes table from data.\n\n Args:\n position_x: Node position x\n position_y: Node position y\n position_z: Node position z\n id (optional): (Globally unique) Node id. Default: New unique ids are generated\n radius (optional): Node radius\n rotation_x (optional): Node rotation x\n rotation_y (optional): Node rotation y\n rotation_z (optional): Node rotation z\n inVP (optional): Viewport index in which node was placed\n inMag (optional): (De-)Magnification factor in which node was placed\n bitDepth (optional): Bit (Color) Depth in which node was placed\n interpolation (optional): Interpolation state in which node was placed\n time (optional): Time stamp at which node was placed\n comment (optional): Comment associated with node\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n if id is None:\n id_max = self.max_node_id()\n id = list(range(id_max + 1, id_max + len(position_x) + 1))\n nodes = Nodes.from_list(id, position_x, position_y, position_z,\n radius, rotation_x, rotation_y, rotation_z, inVP, inMag,\n bitDepth, interpolation, time, comment)\n return nodes\n\n def define_nodes_from_positions(self, positions: np.ndarray) ->Nodes:\n \"\"\" Generates new nodes table from positions only (node ids are generated automatically).\n\n Args:\n positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n id_max = self.max_node_id()\n id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)\n ).reshape(-1, 1)\n nodes = Nodes.from_numpy(np.append(id, positions, axis=1))\n return nodes\n\n def get_distances_to_node(self, positions: Union[Sequence[Tuple[int,\n int, int]], np.ndarray], node_id: int=None, tree_idx: int=None,\n node_idx: int=None, unit: str='um') ->List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions\n\n Args:\n positions (N x 3): Target (x,y,z) positions to which the distances should be computed\n node_id: Node id of the node for which the distances should be computed\n tree_idx: Tree idx of the node for which the distances should be computed\n node_idx: Node idx of the node for which the distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n assert (node_id is not None) ^ (tree_idx is not None) & (node_idx\n is not None\n ), 'Either provide node_id or both tree_idx and node_idx'\n if type(positions) is not np.ndarray:\n positions = np.array(positions)\n if node_id is not None:\n node_idx, tree_idx = self.node_id_to_idx(node_id)\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(positions, np.array(self.nodes[\n tree_idx].position.values[node_idx]), unit_factor)\n return distances\n\n def get_distance_to_nodes(self, position: Union[Tuple[int, int, int],\n np.ndarray], tree_idx: int, unit: str='um') ->List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position\n\n Args:\n position (1 x 3): Target (x,y,z) position to which the node distances should be computed\n tree_idx: Tree idx for which node distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n if type(position) is not np.ndarray:\n position = np.array(position)\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].\n position.values), position, unit_factor)\n return distances\n\n def get_graph(self, tree_idx):\n \"\"\" Returns the networkx graph representation of a tree.\n\n Args:\n tree_idx: Linear index of the tree to be returned as graph object\n\n Returns:\n graph: Graph object\n\n \"\"\"\n nodes = self.nodes[tree_idx]\n edges = self.edges[tree_idx]\n graph = Skeleton._get_graph(nodes, edges)\n return graph\n\n def get_shortest_path(self, node_id_start: int, node_id_end: int) ->List[\n int]:\n \"\"\" Returns the shortest path between two nodes of a tree.\n\n Args:\n node_id_start: Node id of start node\n node_id_end: Node id of end node\n\n Returns:\n shortest_path: Node indices comprising the shortest path\n\n \"\"\"\n _, tree_idx_start = self.node_id_to_idx(node_id_start)\n _, tree_idx_end = self.node_id_to_idx(node_id_end)\n assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'\n graph = self.get_graph(tree_idx_start)\n shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)\n return shortest_path\n\n def plot(self, tree_inds: Union[int, List[int]]=None, view: str=None,\n colors: Union[Tuple[float, float, float, float], List[Tuple[float,\n float, float, float]], str]=None, unit: str='um', show: bool=True,\n ax: plt.axes=None):\n \"\"\" Generates a (3D) line plot of the trees contained in the skeleton object.\n\n Args:\n tree_inds (optional): Tree indices to be plotted.\n Default: All trees are plotted\n view (optional): Plot as 2D projection on orthonormal plane.\n Options: 'xy', 'xz', 'yz'\n Default: Plot as 3D projection\n colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is\n broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.\n Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can\n be passed as a str.\n Default: Skeleton colors (self.colors) are used\n unit (optional): Specifies in which unit the plot should be generated.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).\n Default: 'um' (micrometer)\n show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set\n to False. Default: True\n ax: Axes to be plotted on.\n\n Returns:\n ax: Axes which was plotted on\n \"\"\"\n if tree_inds is None:\n tree_inds = list(range(len(self.nodes)))\n elif tree_inds is int:\n tree_inds = [tree_inds]\n if colors is None:\n colors = self.colors\n elif type(colors) is str:\n cmap = cm.get_cmap(colors)\n colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]\n elif type(colors[0]) is not Sequence:\n colors = [colors] * self.num_trees()\n unit_factor = self._get_unit_factor(unit)\n allowed_views = ['xy', 'xz', 'yz']\n if view is not None:\n assert view in allowed_views, 'The passed view argument: {} is not among the allowed views: {}'.format(\n view, allowed_views)\n if ax is None:\n fig = plt.figure()\n if view is None:\n ax = fig.add_subplot(111, projection='3d')\n else:\n ax = fig.add_subplot(111, projection='rectilinear')\n elif view is None:\n assert ax.name == '3d', 'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'\n else:\n assert ax.name != '3d', 'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'\n lims_min = []\n lims_max = []\n for tree_idx in tree_inds:\n edges = self.edges[tree_idx].copy()\n nodes = self.nodes[tree_idx].copy()\n if len(nodes) > 0:\n nodes['position'] = nodes['position'].multiply(unit_factor)\n if view == 'xy':\n nodes = nodes.drop([('position', 'z')], axis=1)\n elif view == 'xz':\n nodes = nodes.drop([('position', 'y')], axis=1)\n elif view == 'yz':\n nodes = nodes.drop([('position', 'x')], axis=1)\n lims_min.append(np.min(nodes['position'].values, axis=0))\n lims_max.append(np.max(nodes['position'].values, axis=0))\n segments = []\n for edge in edges:\n n0 = nodes['position'][nodes.id == edge[0]].values[0]\n n1 = nodes['position'][nodes.id == edge[1]].values[0]\n segment = [[c for c in n0], [c for c in n1]]\n segments.append(segment)\n if view is None:\n line_collection = art3d.Line3DCollection(segments=\n segments, colors=colors[tree_idx])\n ax.add_collection3d(line_collection)\n else:\n line_collection = LineCollection(segments=segments,\n colors=colors[tree_idx])\n ax.add_collection(line_collection)\n lim_min = np.min(np.array(lims_min), axis=0)\n lim_max = np.max(np.array(lims_max), axis=0)\n ax.set_xlim(lim_min[0], lim_max[0])\n ax.set_ylim(lim_min[1], lim_max[1])\n if view is None:\n ax.set_zlim(lim_min[2], lim_max[2])\n else:\n ax.set_aspect('equal')\n if show:\n plt.show()\n return ax\n\n def write_nml(self, nml_write_path):\n \"\"\" Writes the present state of the skeleton object to a .nml file.\n\n Args:\n nml_write_path: Path to which .nml file should be written\n\n \"\"\"\n if self.num_trees() == 0:\n self.add_tree()\n nml = self._skeleton_to_nml()\n with open(nml_write_path, 'wb') as f:\n wknml.write_nml(f, nml)\n\n def node_id_to_idx(self, node_id: int) ->(int, int):\n \"\"\" Returns the linear tree and node indices for the provided node id.\"\"\"\n node_idx = None\n for tree_idx, nodes in enumerate(self.nodes):\n index_list = nodes[nodes['id'] == node_id].index.tolist()\n if index_list:\n node_idx = index_list[0]\n break\n assert node_idx is not None, 'node id {} does not exist'.format(node_id\n )\n return node_idx, tree_idx\n\n def node_idx_to_id(self, node_idx: int, tree_idx: int) ->int:\n \"\"\" Returns the node id for the provided tree and node idx.\"\"\"\n node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]\n return node_id\n\n def min_group_id(self) ->int:\n \"\"\" Returns lowest group id. If no groups are defined, return None\"\"\"\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmin(group_ids))\n return group_id\n\n def max_group_id(self) ->int:\n \"\"\" Returns highest group id. If no groups are defined, return None\"\"\"\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmax(group_ids))\n return group_id\n\n def min_node_id(self) ->int:\n \"\"\" Returns lowest global node id.\"\"\"\n if len(self.nodes) > 0:\n min_node_id = min([(min(nodes.id) if len(nodes) > 0 else 0) for\n nodes in self.nodes])\n else:\n min_node_id = 0\n return min_node_id\n\n def max_node_id(self) ->int:\n \"\"\" Returns highest global node id.\"\"\"\n if len(self.nodes) > 0:\n max_node_id = max([(max(nodes.id) if len(nodes) > 0 else 0) for\n nodes in self.nodes])\n else:\n max_node_id = 0\n return max_node_id\n\n def min_tree_id(self) ->int:\n \"\"\" Returns lowest global tree id.\"\"\"\n return min(self.tree_ids) if len(self.tree_ids) > 0 else 0\n\n def max_tree_id(self) ->int:\n \"\"\" Returns highest global tree id.\"\"\"\n return max(self.tree_ids) if len(self.tree_ids) > 0 else 0\n\n def num_trees(self) ->int:\n \"\"\"Returns number of trees contained in skeleton object.\"\"\"\n return len(self.nodes)\n\n def groups_ids(self) ->List[int]:\n \"\"\" Returns all ids defined in groups tree\"\"\"\n _, groups_ids = Skeleton._group_get_ids(self.groups)\n return groups_ids\n\n def _get_unit_factor(self, unit: str) ->np.ndarray:\n \"\"\" Returns factor for unit conversion\n\n Args:\n unit: Unit for which to return the conversion factor.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)\n\n Returns:\n unit_factor (shape=(3,)): Unit conversion factors\n \"\"\"\n unit_factors = {'vx': np.array((1, 1, 1)), 'nm': np.array(self.\n parameters.scale), 'um': np.array(self.parameters.scale) / 1000}\n assert unit in unit_factors.keys(), 'Invalid unit'\n unit_factor = unit_factors[unit]\n return unit_factor\n\n def _reset_node_ids(self, start_id: int):\n \"\"\" Resets node ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest node id should be set.\n \"\"\"\n add_id = start_id - self.min_node_id()\n for tree_idx, _ in enumerate(self.nodes):\n self.nodes[tree_idx].nodes['id'] += add_id\n self.edges[tree_idx] += add_id\n\n def _reset_tree_ids(self, start_id: int):\n \"\"\" Resets tree ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest tree id should be set.\n \"\"\"\n add_id = start_id - self.min_tree_id()\n self.tree_ids = [(tree_id + add_id) for tree_id in self.tree_ids]\n\n def _reset_group_ids(self, start_id: int):\n \"\"\" Resets group ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest group id should be set.\n \"\"\"\n min_group_id = self.min_group_id()\n if min_group_id is not None:\n add_id = start_id - min_group_id\n self.group_ids = [(i + add_id if i is not None else i) for i in\n self.group_ids]\n self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda\n x: x + add_id) for group in self.groups]\n\n def _parameters_to_skeleton(self, parameters):\n \"\"\" Generates bare skeleton object from parameters.\"\"\"\n self.parameters = parameters\n\n def _nml_to_skeleton(self, nml):\n \"\"\" Converts wknml to skeleton data structures.\"\"\"\n self.groups = nml.groups\n self.branchpoints = nml.branchpoints\n self.parameters = Parameters(**nml.parameters._asdict())\n for tree in nml.trees:\n self.add_tree(nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree\n .nodes, nml_comments=nml.comments), edges=np.array([(edge.\n source, edge.target) for edge in tree.edges]), group_id=\n tree.groupId, name=tree.name, color=tree.color)\n\n def _skeleton_to_nml(self):\n \"\"\" Converts skeleton to wknml data structures.\"\"\"\n trees = []\n for tree_idx, tree_id in enumerate(self.tree_ids):\n nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])\n nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])\n tree = wknml.Tree(id=tree_id, color=self.colors[tree_idx], name\n =self.names[tree_idx], groupId=self.group_ids[tree_idx],\n nodes=nml_nodes, edges=nml_edges)\n trees.append(tree)\n nml = wknml.NML(parameters=wknml.NMLParameters(**self.parameters.\n _asdict()), trees=trees, branchpoints=self.branchpoints,\n comments=self._skeleton_to_nml_comments(), groups=self.groups)\n return nml\n\n def _skeleton_to_nml_comments(self):\n \"\"\" Converts skeleton to wknml comments.\"\"\"\n nml_comments = []\n for nodes in self.nodes:\n comment_nodes = nodes[nodes['comment'].notnull()]\n for _, row in comment_nodes.iterrows():\n nml_comment = wknml.Comment(node=row['id'].values[0],\n content=row['comment'].values[0])\n nml_comments.append(nml_comment)\n return nml_comments\n\n @staticmethod\n def define_parameters(name: str, scale: Tuple[float, float, float],\n offset: Tuple[float, float, float]=(0, 0, 0), time: int=0,\n editPosition: Tuple[float, float, float]=(1.0, 1.0, 1.0),\n editRotation: Tuple[float, float, float]=(0.0, 0.0, 0.0), zoomLevel:\n float=1.0, taskBoundingBox: Tuple[int, int, int, int, int, int]=\n None, userBoundingBox: Tuple[int, int, int, int, int, int]=None\n ) ->Parameters:\n parameters = Parameters(name=name, scale=scale, offset=offset, time\n =time, editPosition=editPosition, editRotation=editRotation,\n zoomLevel=zoomLevel, taskBoundingBox=taskBoundingBox,\n userBoundingBox=userBoundingBox)\n return parameters\n\n @staticmethod\n def get_distance(positions: np.ndarray, position: np.ndarray,\n unit_factor: np.ndarray=None):\n \"\"\" Get the (euclidean) distances between positions and a target position\n\n Args:\n positions (N x 3): Array holding (multiple) x, y, z positions\n position (1 x 3): Array holding x, y, z position to which the distances should be computed\n unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)\n\n Returns:\n distances: Arrays holding distances\n\n \"\"\"\n if unit_factor is None:\n unit_factor = np.array([1, 1, 1])\n distances = np.sqrt(np.sum(((positions - position) * unit_factor.\n reshape(1, 3)) ** 2, axis=1))\n return distances\n\n @staticmethod\n def _nml_nodes_to_nodes(nml_nodes, nml_comments):\n \"\"\" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass).\"\"\"\n data = [(node.id, node.position[0], node.position[1], node.position\n [2], node.radius, node.rotation[0], node.rotation[1], node.\n rotation[2], node.inVp, node.inMag, node.bitDepth, node.\n interpolation, node.time, np.nan) for node in nml_nodes]\n nodes = Nodes(data=data)\n comment_node_ids = [comment.node for comment in nml_comments]\n comment_strings = [comment.content for comment in nml_comments]\n nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]\n for id in nodes_ids_comments:\n id_comment = comment_strings[comment_node_ids.index(id)]\n nodes.loc[nodes.id == id, ('comment', '')] = id_comment\n return nodes\n\n @staticmethod\n def _nodes_to_nml_nodes(nodes):\n \"\"\" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples).\"\"\"\n nml_nodes = []\n for idx, row in nodes.iterrows():\n nml_node = wknml.Node(id=int(row.id), position=tuple(row.\n position.values), radius=float(row.radius), rotation=tuple(\n row.rotation.values), inVp=int(row.inVp), inMag=int(row.\n inMag), bitDepth=int(row.bitDepth), interpolation=bool(row.\n interpolation.values), time=int(row.time))\n nml_nodes.append(nml_node)\n return nml_nodes\n\n @staticmethod\n def _edges_to_nml_edges(edges):\n \"\"\" Converts skeleton edges (numpy array) to wknml edges (list of named tuples).\"\"\"\n nml_edges = []\n for idx in range(edges.shape[0]):\n nml_edge = wknml.Edge(source=int(edges[idx, 0]), target=int(\n edges[idx, 1]))\n nml_edges.append(nml_edge)\n return nml_edges\n\n @staticmethod\n def _group_append(groups, id, new_group):\n \"\"\" Appends new group as a child of existing group with specified id. Currently only works up to depth=3.\"\"\"\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n path_inds = list(reversed(path_inds))\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=\n new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]\n ]._replace(children=new_group)\n return groups\n\n @staticmethod\n def _group_parent(groups, id, parent_id=None, parent_idx=None,\n child_idx=None):\n \"\"\" Returns the id of the parent group for a (child) group with specified id.\"\"\"\n for group in groups:\n if id in [x.id for x in group.children]:\n parent_id = group.id\n parent_idx = groups.index(group)\n child_idx = [x.id for x in group.children].index(id)\n else:\n parent_id, parent_idx, child_idx = Skeleton._group_parent(group\n .children, id, parent_id, parent_idx, child_idx)\n return parent_id, parent_idx, child_idx\n\n @staticmethod\n def _group_modify_id(group, id_modifier):\n \"\"\" Modifies group ids with the passed id_modifier (e.g. lambda) function.\"\"\"\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton.\n _group_modify_id(g, id_modifier), group.children)))\n return group\n\n @staticmethod\n def _group_get_ids(groups, ids=[]):\n for group in groups:\n ids.append(group.id)\n Skeleton._group_get_ids(group.children, ids)\n return groups, ids\n\n @staticmethod\n def _get_graph(nodes: Nodes, edges: np.ndarray):\n \"\"\" Returns the networkx graph representation of provided nodes and edges.\"\"\"\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n return graph\n\n @staticmethod\n def _num_conn_comp(graph):\n \"\"\" Returns number of connected components for graph\"\"\"\n return nx.number_connected_components(graph)\n",
"step-5": "import os\nimport numpy as np\nimport networkx as nx\nfrom matplotlib import colors, cm\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom mpl_toolkits.mplot3d import Axes3D, art3d\nfrom typing import Union, Sequence, List, Tuple, Optional\n\nimport wknml\n\nfrom wkskel.types import Nodes, Parameters\n\n\nclass Skeleton:\n \"\"\"The Skeleton class facilitates scientific analysis and manipulation of webKnossos tracings.\n\n It is designed as a high-level interface for working with nml files generated e.g with webKnossos. It makes use of\n the (low-level) `wknml` package mostly as an I/O interface to nml files.\n\n Class Attributes:\n DEFAULTS (dict): Global default parameters which are passed to each skeleton object instance\n\n \"\"\"\n\n DEFAULTS = {\n 'node': {\n 'radius': 100,\n 'comment': ''\n },\n 'tree': {\n 'color': (0.0, 0.0, 0.0, 1.0)\n }\n }\n\n def __init__(self, nml_path: str = None, parameters: Parameters = None, strict = True):\n \"\"\" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments\n\n Args:\n nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the\n trees and additional properties specified in the .nml file\n parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties\n of the skeleton.\n strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with\n webKnossos. Default: True\n\n Examples:\n Using nml_path:\n nml_path = '/path/to/example.nml'\n skel = Skeleton(nml_path)\n\n Using parameters:\n parameters = Skeleton.define_parameters(name=\"2017-01-12_FD0156-2\", scale=(11.24, 11.24, 32))\n skel = Skeleton(parameters=parameters)\n \"\"\"\n\n assert (nml_path is not None) ^ (parameters is not None), \\\n 'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'\n\n self.nodes = list()\n self.edges = list()\n self.names = list()\n self.colors = list()\n self.tree_ids = list()\n self.group_ids = list()\n self.groups = list()\n self.branchpoints = list()\n self.parameters = Parameters()\n self.nml_path = str()\n\n self.strict = strict\n self.defaults = self.DEFAULTS\n\n # Construct from nml file\n if nml_path is not None:\n assert os.path.exists(nml_path), \\\n 'not a valid path: {}'.format(nml_path)\n try:\n with open(nml_path, \"rb\") as f:\n nml = wknml.parse_nml(f)\n except IOError:\n print('not a valid nml file: {}'.format(nml_path))\n\n self._nml_to_skeleton(nml)\n\n # Construct from parameters\n else:\n assert type(parameters) is Parameters, \\\n 'provided parameters must be of type wkskel.types.Parameters'\n\n self._parameters_to_skeleton(parameters)\n\n def add_tree(self,\n nodes: Nodes = Nodes(),\n edges: Union[List[Tuple[int, int]], np.ndarray] = None,\n tree_id: int = None,\n name: str = '',\n group_id: int = None,\n color: Tuple[float, float, float, float] = None):\n \"\"\" Appends new tree to skeleton.\n\n Args:\n nodes (optional): Nodes representing tree to be added\n edges (optional): Edges representing tree to be added\n tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1\n name (optional): Name to be used for new tree. Default: Empty str\n group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.\n Default: None\n color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n\n if edges is None:\n edges = np.empty((0, 2), dtype=np.uint32)\n elif type(edges) is list:\n edges = np.asarray(edges)\n\n if self.strict & (len(nodes) > 1):\n assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)) == 1, \\\n 'Added tree consists of more than one connected component'\n\n if tree_id is None:\n tree_id = self.max_tree_id() + 1\n\n if (group_id is not None) & (group_id not in self.groups_ids()):\n self.add_group(id=group_id)\n\n if color is None:\n color = self.defaults['tree']['color']\n\n self.nodes.append(nodes)\n self.edges.append(edges)\n self.tree_ids.append(tree_id)\n self.group_ids.append(group_id)\n self.names.append(name)\n self.colors.append(color)\n\n def add_tree_from_skel(self,\n skel: 'Skeleton',\n tree_idx: int,\n group_id: int = None,\n name: str = None):\n \"\"\" Appends a specific tree contained in a different skeleton object to the skeleton.\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n tree_idx: Source tree index of tree to be added\n group_id (optional): Target group id to which the added tree should be assigned. Default: None\n name (optional): Target name for the added tree\n \"\"\"\n\n if group_id not in self.groups_ids():\n self.add_group(id=group_id)\n\n if name is None:\n name = skel.names[tree_idx]\n\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n\n self.nodes = self.nodes + [skel.nodes[tree_idx]]\n self.edges = self.edges + [skel.edges[tree_idx]]\n self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]\n self.group_ids = self.group_ids + [group_id]\n self.names = self.names + [name]\n self.colors = self.colors + [skel.colors[tree_idx]]\n\n return self\n\n def add_trees_from_skel(self, skel: 'Skeleton'):\n \"\"\" Appends all trees contained in a different skeleton object to the skeleton.\n\n This method attempts to preserve the relative group structure found in the skeleton object to be added\n\n Args:\n skel: Source skeleton object (different from the one calling this method) to be added\n \"\"\"\n\n skel._reset_node_ids(self.max_node_id() + 1)\n skel._reset_tree_ids(self.max_tree_id() + 1)\n\n max_group_id = self.max_group_id()\n if max_group_id is not None:\n skel._reset_group_ids(max_group_id + 1)\n\n self.nodes = self.nodes + skel.nodes\n self.edges = self.edges + skel.edges\n self.tree_ids = self.tree_ids + skel.tree_ids\n self.group_ids = self.group_ids + skel.group_ids\n self.groups = self.groups + skel.groups\n self.names = self.names + skel.names\n self.colors = self.colors + skel.colors\n\n return self\n\n def add_nodes_as_trees(self,\n nodes: Nodes,\n tree_ids: List[int] = None,\n group_ids: List[int] = None,\n names: List[str] = None,\n colors: List[Tuple[float, float, float, float]] = None):\n \"\"\" Appends each of the specified nodes as separate trees to the skeleton (1 node each).\n\n Args:\n nodes: Nodes representing the trees to be added\n tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]\n group_ids (optional): Group ids to be assigned to the newly added trees. Default: None\n names (optional): Names to be assigned to the newly added trees.\n colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)\n \"\"\"\n\n if tree_ids is None:\n tree_id_start = self.max_tree_id() + 1\n tree_id_end = tree_id_start + len(nodes)\n tree_ids = list(range(tree_id_start, tree_id_end))\n\n if group_ids is None:\n group_ids = [None for x in range(len(nodes))]\n\n if names is None:\n names = ['' for x in range(len(nodes))]\n\n if colors is None:\n colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]\n\n for node_idx, _ in nodes.iterrows():\n self.add_tree(\n nodes=nodes[node_idx:node_idx+1],\n tree_id=tree_ids[node_idx],\n group_id=group_ids[node_idx],\n name=names[node_idx],\n color=colors[node_idx]\n )\n\n def delete_tree(self, idx: int = None, id: int = None):\n \"\"\" Deletes tree with specified idx or id.\n\n Args:\n idx: Linear index of tree to be deleted\n id: Id of tree to be deleted\n\n \"\"\"\n\n if id is not None:\n idx = self.tree_ids.index(id)\n\n self.nodes.pop(idx)\n self.edges.pop(idx)\n self.names.pop(idx)\n self.colors.pop(idx)\n self.tree_ids.pop(idx)\n self.group_ids.pop(idx)\n\n def add_group(self, parent_id: int = None, id: int = None, name: str = None):\n \"\"\" Adds a new group to skeleton object.\n\n Args:\n parent_id: Parent group id to which new group is added as a child. Default: None (root group)\n id: Id of new group to be added. Default: Current max group id + 1\n name: Name of new group to be added. Default: 'Group {}'.format(id)\n\n Returns:\n id: Id of added group\n name: Name of added group\n\n \"\"\"\n if parent_id is not None:\n assert (parent_id in self.group_ids), ('Parent id does not exist')\n\n if id is None:\n id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)\n else:\n assert (id not in self.groups_ids()), ('Id already exists')\n\n if name is None:\n name = 'Group {}'.format(id)\n\n new_group = wknml.Group(id, name, [])\n if parent_id is None:\n self.groups.append(new_group)\n else:\n self.groups = Skeleton._group_append(self.groups, parent_id, new_group)\n\n return id, name\n\n def delete_group(self, id, target_id):\n # TODO\n pass\n\n def define_nodes(self,\n position_x: List[int],\n position_y: List[int],\n position_z: List[int],\n id: List[int] = None,\n radius: Optional[List[int]] = None,\n rotation_x: Optional[List[float]] = None,\n rotation_y: Optional[List[float]] = None,\n rotation_z: Optional[List[float]] = None,\n inVP: Optional[List[int]] = None,\n inMag: Optional[List[int]] = None,\n bitDepth: Optional[List[int]] = None,\n interpolation: Optional[List[bool]] = None,\n time: Optional[List[int]] = None,\n comment: Optional[List[int]] = None) -> Nodes:\n \"\"\" Generates new nodes table from data.\n\n Args:\n position_x: Node position x\n position_y: Node position y\n position_z: Node position z\n id (optional): (Globally unique) Node id. Default: New unique ids are generated\n radius (optional): Node radius\n rotation_x (optional): Node rotation x\n rotation_y (optional): Node rotation y\n rotation_z (optional): Node rotation z\n inVP (optional): Viewport index in which node was placed\n inMag (optional): (De-)Magnification factor in which node was placed\n bitDepth (optional): Bit (Color) Depth in which node was placed\n interpolation (optional): Interpolation state in which node was placed\n time (optional): Time stamp at which node was placed\n comment (optional): Comment associated with node\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n\n if id is None:\n id_max = self.max_node_id()\n id = list(range(id_max+1, id_max+len(position_x)+1))\n\n nodes = Nodes.from_list(id, position_x, position_y, position_z, radius, rotation_x, rotation_y,\n rotation_z, inVP, inMag, bitDepth, interpolation, time, comment)\n\n return nodes\n\n def define_nodes_from_positions(self, positions: np.ndarray) -> Nodes:\n \"\"\" Generates new nodes table from positions only (node ids are generated automatically).\n\n Args:\n positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table\n\n Returns:\n nodes: Nodes object\n\n \"\"\"\n\n id_max = self.max_node_id()\n id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)).reshape(-1, 1)\n\n nodes = Nodes.from_numpy(np.append(id, positions, axis=1))\n\n return nodes\n\n def get_distances_to_node(self,\n positions: Union[Sequence[Tuple[int, int, int]], np.ndarray],\n node_id: int = None,\n tree_idx: int = None,\n node_idx: int = None,\n unit: str = 'um') -> List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions\n\n Args:\n positions (N x 3): Target (x,y,z) positions to which the distances should be computed\n node_id: Node id of the node for which the distances should be computed\n tree_idx: Tree idx of the node for which the distances should be computed\n node_idx: Node idx of the node for which the distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n\n assert (node_id is not None) ^ ((tree_idx is not None) & (node_idx is not None)), \\\n 'Either provide node_id or both tree_idx and node_idx'\n\n if type(positions) is not np.ndarray:\n positions = np.array(positions)\n\n if node_id is not None:\n node_idx, tree_idx = self.node_id_to_idx(node_id)\n\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(positions, np.array(self.nodes[tree_idx].position.values[node_idx]), unit_factor)\n\n return distances\n\n def get_distance_to_nodes(self,\n position: Union[Tuple[int, int, int], np.ndarray],\n tree_idx: int,\n unit: str = 'um') -> List[np.ndarray]:\n \"\"\" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position\n\n Args:\n position (1 x 3): Target (x,y,z) position to which the node distances should be computed\n tree_idx: Tree idx for which node distances should be computed\n unit (optional): Unit flag specifying in which unit the distances should be returned.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)\n\n Returns:\n distances: Array holding distances\n\n \"\"\"\n\n if type(position) is not np.ndarray:\n position = np.array(position)\n\n unit_factor = self._get_unit_factor(unit)\n distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].position.values), position, unit_factor)\n\n return distances\n\n def get_graph(self, tree_idx):\n \"\"\" Returns the networkx graph representation of a tree.\n\n Args:\n tree_idx: Linear index of the tree to be returned as graph object\n\n Returns:\n graph: Graph object\n\n \"\"\"\n\n nodes = self.nodes[tree_idx]\n edges = self.edges[tree_idx]\n graph = Skeleton._get_graph(nodes, edges)\n\n return graph\n\n def get_shortest_path(self, node_id_start: int, node_id_end: int) -> List[int]:\n \"\"\" Returns the shortest path between two nodes of a tree.\n\n Args:\n node_id_start: Node id of start node\n node_id_end: Node id of end node\n\n Returns:\n shortest_path: Node indices comprising the shortest path\n\n \"\"\"\n\n _, tree_idx_start = self.node_id_to_idx(node_id_start)\n _, tree_idx_end = self.node_id_to_idx(node_id_end)\n\n assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'\n\n graph = self.get_graph(tree_idx_start)\n shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)\n\n return shortest_path\n\n def plot(self,\n tree_inds: Union[int, List[int]] = None,\n view: str = None,\n colors: Union[Tuple[float, float, float, float], List[Tuple[float, float, float, float]], str] = None,\n unit: str = 'um',\n show: bool = True,\n ax: plt.axes = None):\n \"\"\" Generates a (3D) line plot of the trees contained in the skeleton object.\n\n Args:\n tree_inds (optional): Tree indices to be plotted.\n Default: All trees are plotted\n view (optional): Plot as 2D projection on orthonormal plane.\n Options: 'xy', 'xz', 'yz'\n Default: Plot as 3D projection\n colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is\n broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.\n Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can\n be passed as a str.\n Default: Skeleton colors (self.colors) are used\n unit (optional): Specifies in which unit the plot should be generated.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).\n Default: 'um' (micrometer)\n show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set\n to False. Default: True\n ax: Axes to be plotted on.\n\n Returns:\n ax: Axes which was plotted on\n \"\"\"\n\n if tree_inds is None:\n tree_inds = list(range(len(self.nodes)))\n elif tree_inds is int:\n tree_inds = [tree_inds]\n\n if colors is None:\n colors = self.colors\n elif type(colors) is str:\n cmap = cm.get_cmap(colors)\n colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]\n elif type(colors[0]) is not Sequence:\n colors = [colors] * self.num_trees()\n\n\n unit_factor = self._get_unit_factor(unit)\n\n allowed_views = ['xy', 'xz', 'yz']\n if view is not None:\n assert (view in allowed_views), \\\n 'The passed view argument: {} is not among the allowed views: {}'.format(view, allowed_views)\n\n if ax is None:\n fig = plt.figure()\n if view is None:\n ax = fig.add_subplot(111, projection='3d')\n else:\n ax = fig.add_subplot(111, projection='rectilinear')\n else:\n if view is None:\n assert (ax.name == '3d'), \\\n 'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'\n else:\n assert (ax.name != '3d'), \\\n 'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'\n\n lims_min = []\n lims_max = []\n\n for tree_idx in tree_inds:\n edges = self.edges[tree_idx].copy()\n nodes = self.nodes[tree_idx].copy()\n\n if len(nodes) > 0:\n nodes['position'] = nodes['position'].multiply(unit_factor)\n if view == 'xy':\n nodes = nodes.drop([('position', 'z')], axis=1)\n elif view == 'xz':\n nodes = nodes.drop([('position', 'y')], axis=1)\n elif view == 'yz':\n nodes = nodes.drop([('position', 'x')], axis=1)\n lims_min.append(np.min(nodes['position'].values, axis=0))\n lims_max.append(np.max(nodes['position'].values, axis=0))\n\n segments = []\n for edge in edges:\n n0 = nodes['position'][nodes.id == edge[0]].values[0]\n n1 = nodes['position'][nodes.id == edge[1]].values[0]\n segment = [[c for c in n0], [c for c in n1]]\n segments.append(segment)\n\n if view is None:\n line_collection = art3d.Line3DCollection(segments=segments, colors=colors[tree_idx])\n ax.add_collection3d(line_collection)\n else:\n line_collection = LineCollection(segments=segments, colors=colors[tree_idx])\n ax.add_collection(line_collection)\n\n lim_min = np.min(np.array(lims_min), axis=0)\n lim_max = np.max(np.array(lims_max), axis=0)\n\n ax.set_xlim(lim_min[0], lim_max[0])\n ax.set_ylim(lim_min[1], lim_max[1])\n if view is None:\n ax.set_zlim(lim_min[2], lim_max[2])\n else:\n ax.set_aspect('equal')\n\n if show:\n plt.show()\n\n return ax\n\n def write_nml(self, nml_write_path):\n \"\"\" Writes the present state of the skeleton object to a .nml file.\n\n Args:\n nml_write_path: Path to which .nml file should be written\n\n \"\"\"\n\n # If the object does not have any trees, construct an empty tree before writing to enable webKnossos import\n if self.num_trees() == 0:\n self.add_tree()\n\n nml = self._skeleton_to_nml()\n with open(nml_write_path, \"wb\") as f:\n wknml.write_nml(f, nml)\n\n # Convenience Methods\n def node_id_to_idx(self, node_id: int) -> (int, int):\n \"\"\" Returns the linear tree and node indices for the provided node id.\"\"\"\n\n node_idx = None\n for tree_idx, nodes in enumerate(self.nodes):\n index_list = nodes[nodes['id'] == node_id].index.tolist()\n if index_list:\n node_idx = index_list[0]\n break\n\n assert (node_idx is not None), \\\n 'node id {} does not exist'.format(node_id)\n\n return node_idx, tree_idx\n\n def node_idx_to_id(self, node_idx: int, tree_idx: int) -> int:\n \"\"\" Returns the node id for the provided tree and node idx.\"\"\"\n\n node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]\n\n return node_id\n\n def min_group_id(self) -> int:\n \"\"\" Returns lowest group id. If no groups are defined, return None\"\"\"\n\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmin(group_ids))\n\n return group_id\n\n def max_group_id(self) -> int:\n \"\"\" Returns highest group id. If no groups are defined, return None\"\"\"\n\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmax(group_ids))\n\n return group_id\n\n def min_node_id(self) -> int:\n \"\"\" Returns lowest global node id.\"\"\"\n\n if len(self.nodes) > 0:\n min_node_id = min([min(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])\n else:\n min_node_id = 0\n\n return min_node_id\n\n def max_node_id(self) -> int:\n \"\"\" Returns highest global node id.\"\"\"\n\n if len(self.nodes) > 0:\n max_node_id = max([max(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])\n else:\n max_node_id = 0\n\n return max_node_id\n\n def min_tree_id(self) -> int:\n \"\"\" Returns lowest global tree id.\"\"\"\n\n return min(self.tree_ids) if len(self.tree_ids)>0 else 0\n\n def max_tree_id(self) -> int:\n \"\"\" Returns highest global tree id.\"\"\"\n\n return max(self.tree_ids) if len(self.tree_ids)>0 else 0\n\n def num_trees(self) -> int:\n \"\"\"Returns number of trees contained in skeleton object.\"\"\"\n\n return len(self.nodes)\n\n def groups_ids(self) -> List[int]:\n \"\"\" Returns all ids defined in groups tree\"\"\"\n\n _, groups_ids = Skeleton._group_get_ids(self.groups)\n\n return groups_ids\n\n # Private Methods\n def _get_unit_factor(self, unit: str) -> np.ndarray:\n \"\"\" Returns factor for unit conversion\n\n Args:\n unit: Unit for which to return the conversion factor.\n Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)\n\n Returns:\n unit_factor (shape=(3,)): Unit conversion factors\n \"\"\"\n\n unit_factors = {\n 'vx': np.array((1, 1, 1)),\n 'nm': np.array(self.parameters.scale),\n 'um': np.array(self.parameters.scale)/1000\n }\n assert unit in unit_factors.keys(), 'Invalid unit'\n unit_factor = unit_factors[unit]\n\n return unit_factor\n\n def _reset_node_ids(self, start_id: int):\n \"\"\" Resets node ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest node id should be set.\n \"\"\"\n\n add_id = start_id - self.min_node_id()\n for tree_idx, _ in enumerate(self.nodes):\n self.nodes[tree_idx].nodes['id'] += add_id\n self.edges[tree_idx] += add_id\n\n def _reset_tree_ids(self, start_id: int):\n \"\"\" Resets tree ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest tree id should be set.\n \"\"\"\n\n add_id = start_id - self.min_tree_id()\n self.tree_ids = [tree_id + add_id for tree_id in self.tree_ids]\n\n def _reset_group_ids(self, start_id: int):\n \"\"\" Resets group ids of skeleton to begin with start value.\n\n Args:\n start_id: Start value to which the lowest group id should be set.\n \"\"\"\n\n min_group_id = self.min_group_id()\n if min_group_id is not None:\n add_id = start_id - min_group_id\n self.group_ids = [i + add_id if i is not None else i for i in self.group_ids]\n self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda x: x + add_id) for group in self.groups]\n\n def _parameters_to_skeleton(self, parameters):\n \"\"\" Generates bare skeleton object from parameters.\"\"\"\n\n self.parameters = parameters\n\n def _nml_to_skeleton(self, nml):\n \"\"\" Converts wknml to skeleton data structures.\"\"\"\n\n self.groups = nml.groups\n self.branchpoints = nml.branchpoints\n self.parameters = Parameters(**nml.parameters._asdict())\n\n for tree in nml.trees:\n self.add_tree(\n nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree.nodes, nml_comments=nml.comments),\n edges=np.array([(edge.source, edge.target) for edge in tree.edges]),\n group_id=tree.groupId,\n name=tree.name,\n color=tree.color\n )\n\n def _skeleton_to_nml(self):\n \"\"\" Converts skeleton to wknml data structures.\"\"\"\n\n trees = []\n for tree_idx, tree_id in enumerate(self.tree_ids):\n nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])\n nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])\n tree = wknml.Tree(\n id=tree_id,\n color=self.colors[tree_idx],\n name=self.names[tree_idx],\n groupId=self.group_ids[tree_idx],\n nodes=nml_nodes,\n edges=nml_edges\n )\n trees.append(tree)\n\n nml = wknml.NML(\n parameters=wknml.NMLParameters(**self.parameters._asdict()),\n trees=trees,\n branchpoints=self.branchpoints,\n comments=self._skeleton_to_nml_comments(),\n groups=self.groups\n )\n\n return nml\n\n def _skeleton_to_nml_comments(self):\n \"\"\" Converts skeleton to wknml comments.\"\"\"\n\n nml_comments = []\n for nodes in self.nodes:\n comment_nodes = nodes[nodes['comment'].notnull()]\n for _, row in comment_nodes.iterrows():\n nml_comment = wknml.Comment(\n node=row['id'].values[0],\n content=row['comment'].values[0]\n )\n nml_comments.append(nml_comment)\n\n return nml_comments\n\n # Static Methods\n @staticmethod\n def define_parameters(\n name: str,\n scale: Tuple[float, float, float],\n offset: Tuple[float, float, float] = (0, 0, 0),\n time: int = 0,\n editPosition: Tuple[float, float, float] = (1.0, 1.0, 1.0),\n editRotation: Tuple[float, float, float] = (0.0, 0.0, 0.0),\n zoomLevel: float = 1.0,\n taskBoundingBox: Tuple[int, int, int, int, int, int] = None,\n userBoundingBox: Tuple[int, int, int, int, int, int] = None) -> Parameters:\n\n parameters = Parameters(\n name=name,\n scale=scale,\n offset=offset,\n time=time,\n editPosition=editPosition,\n editRotation=editRotation,\n zoomLevel=zoomLevel,\n taskBoundingBox=taskBoundingBox,\n userBoundingBox=userBoundingBox\n )\n\n return parameters\n\n # Static Methods\n @staticmethod\n def get_distance(positions: np.ndarray, position: np.ndarray, unit_factor: np.ndarray = None):\n \"\"\" Get the (euclidean) distances between positions and a target position\n\n Args:\n positions (N x 3): Array holding (multiple) x, y, z positions\n position (1 x 3): Array holding x, y, z position to which the distances should be computed\n unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)\n\n Returns:\n distances: Arrays holding distances\n\n \"\"\"\n\n if unit_factor is None:\n unit_factor = np.array([1, 1, 1])\n\n distances = np.sqrt(np.sum(((positions - position) * unit_factor.reshape(1, 3)) ** 2, axis=1))\n\n return distances\n\n # Static Private Methods\n @staticmethod\n def _nml_nodes_to_nodes(nml_nodes, nml_comments):\n \"\"\" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass).\"\"\"\n\n data = [(node.id, node.position[0], node.position[1], node.position[2], node.radius, node.rotation[0],\n node.rotation[1], node.rotation[2], node.inVp, node.inMag, node.bitDepth, node.interpolation,\n node.time, np.nan) for node in nml_nodes]\n\n nodes = Nodes(data=data)\n\n # Add comments to nodes table\n comment_node_ids = [comment.node for comment in nml_comments]\n comment_strings = [comment.content for comment in nml_comments]\n nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]\n for id in nodes_ids_comments:\n id_comment = comment_strings[comment_node_ids.index(id)]\n nodes.loc[nodes.id == id, ('comment', '')] = id_comment\n\n return nodes\n\n @staticmethod\n def _nodes_to_nml_nodes(nodes):\n \"\"\" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples).\"\"\"\n\n nml_nodes = []\n for idx, row in nodes.iterrows():\n nml_node = wknml.Node(\n id=int(row.id),\n position=tuple(row.position.values),\n radius=float(row.radius),\n rotation=tuple(row.rotation.values),\n inVp=int(row.inVp),\n inMag=int(row.inMag),\n bitDepth=int(row.bitDepth),\n interpolation=bool(row.interpolation.values),\n time=int(row.time)\n )\n nml_nodes.append(nml_node)\n\n return nml_nodes\n\n @staticmethod\n def _edges_to_nml_edges(edges):\n \"\"\" Converts skeleton edges (numpy array) to wknml edges (list of named tuples).\"\"\"\n\n nml_edges = []\n for idx in range(edges.shape[0]):\n nml_edge = wknml.Edge(\n source=int(edges[idx, 0]),\n target=int(edges[idx, 1]),\n )\n nml_edges.append(nml_edge)\n\n return nml_edges\n\n @staticmethod\n def _group_append(groups, id, new_group):\n \"\"\" Appends new group as a child of existing group with specified id. Currently only works up to depth=3.\"\"\"\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups\n\n @staticmethod\n def _group_parent(groups, id, parent_id=None, parent_idx=None, child_idx=None):\n \"\"\" Returns the id of the parent group for a (child) group with specified id.\"\"\"\n\n for group in groups:\n if id in [x.id for x in group.children]:\n parent_id = group.id\n parent_idx = groups.index(group)\n child_idx = [x.id for x in group.children].index(id)\n else:\n parent_id, parent_idx, child_idx = Skeleton._group_parent(group.children, id, parent_id, parent_idx, child_idx)\n\n return parent_id, parent_idx, child_idx\n\n @staticmethod\n def _group_modify_id(group, id_modifier):\n \"\"\" Modifies group ids with the passed id_modifier (e.g. lambda) function.\"\"\"\n\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))\n\n return group\n\n @staticmethod\n def _group_get_ids(groups, ids = []):\n\n for group in groups:\n ids.append(group.id)\n Skeleton._group_get_ids(group.children, ids)\n\n return groups, ids\n\n @staticmethod\n def _get_graph(nodes: Nodes, edges: np.ndarray):\n \"\"\" Returns the networkx graph representation of provided nodes and edges.\"\"\"\n\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n\n return graph\n\n @staticmethod\n def _num_conn_comp(graph):\n \"\"\" Returns number of connected components for graph\"\"\"\n\n return nx.number_connected_components(graph)\n\n\n",
"step-ids": [
25,
43,
44,
46,
50
]
}
|
[
25,
43,
44,
46,
50
] |
<|reserved_special_token_0|>
@login_manager.user_loader
def load_user(userid):
try:
return models.user.get(models.User.id == userid)
except models.DoesNotExist:
return None
def initialize():
models.DATABASE.connect()
models.DATABASE.create_tables([models.User], safe=True)
models.DATABASE.closer()
@app.before_request
def before_request():
""""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
<|reserved_special_token_0|>
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash('Yay, you registered', 'sucess')
models.User.create_user(username=form.username.data, email=form.
email.data, password=form.password.data, confrimpassword=form.
password.data)
return redirect(url_for('index'))
return render_template('register.html', form=form)
def check_password_hash(password, data):
pass
<|reserved_special_token_0|>
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You.ve been logged out! Come back soon!', 'sucess')
return redirect(url_for('index'))
@app.route('/new_post', methods=('GET', 'POST'))
@login_required
def post():
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(), content=form.
content.data.strip())
flash('Message Posted! Thanks!', 'sucess')
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/')
def index():
return 'Hey!'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_manager.user_loader
def load_user(userid):
try:
return models.user.get(models.User.id == userid)
except models.DoesNotExist:
return None
def initialize():
models.DATABASE.connect()
models.DATABASE.create_tables([models.User], safe=True)
models.DATABASE.closer()
@app.before_request
def before_request():
""""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
<|reserved_special_token_0|>
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash('Yay, you registered', 'sucess')
models.User.create_user(username=form.username.data, email=form.
email.data, password=form.password.data, confrimpassword=form.
password.data)
return redirect(url_for('index'))
return render_template('register.html', form=form)
def check_password_hash(password, data):
pass
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.emails == form.email.data)
except models.DoesNOtExit:
flash("Your email or password doesn't match !", 'error')
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in:", 'Sucess')
return redirect(url_for('index'))
else:
flash("Your email or password doesn't match!", 'error')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You.ve been logged out! Come back soon!', 'sucess')
return redirect(url_for('index'))
@app.route('/new_post', methods=('GET', 'POST'))
@login_required
def post():
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(), content=form.
content.data.strip())
flash('Message Posted! Thanks!', 'sucess')
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/')
def index():
return 'Hey!'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_manager.user_loader
def load_user(userid):
try:
return models.user.get(models.User.id == userid)
except models.DoesNotExist:
return None
def initialize():
models.DATABASE.connect()
models.DATABASE.create_tables([models.User], safe=True)
models.DATABASE.closer()
@app.before_request
def before_request():
""""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
"""""Close the database connection after request. """
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash('Yay, you registered', 'sucess')
models.User.create_user(username=form.username.data, email=form.
email.data, password=form.password.data, confrimpassword=form.
password.data)
return redirect(url_for('index'))
return render_template('register.html', form=form)
def check_password_hash(password, data):
pass
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.emails == form.email.data)
except models.DoesNOtExit:
flash("Your email or password doesn't match !", 'error')
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in:", 'Sucess')
return redirect(url_for('index'))
else:
flash("Your email or password doesn't match!", 'error')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You.ve been logged out! Come back soon!', 'sucess')
return redirect(url_for('index'))
@app.route('/new_post', methods=('GET', 'POST'))
@login_required
def post():
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(), content=form.
content.data.strip())
flash('Message Posted! Thanks!', 'sucess')
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/')
def index():
return 'Hey!'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from flask import Flask, g, render_template, flash, redirect, url_for
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
import forms
import models
import sqlite3
DEBUG = True
app = Flask(__name__)
app.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.user.get(models.User.id == userid)
except models.DoesNotExist:
return None
def initialize():
models.DATABASE.connect()
models.DATABASE.create_tables([models.User], safe=True)
models.DATABASE.closer()
@app.before_request
def before_request():
""""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
"""""Close the database connection after request. """
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash('Yay, you registered', 'sucess')
models.User.create_user(username=form.username.data, email=form.
email.data, password=form.password.data, confrimpassword=form.
password.data)
return redirect(url_for('index'))
return render_template('register.html', form=form)
def check_password_hash(password, data):
pass
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.emails == form.email.data)
except models.DoesNOtExit:
flash("Your email or password doesn't match !", 'error')
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in:", 'Sucess')
return redirect(url_for('index'))
else:
flash("Your email or password doesn't match!", 'error')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You.ve been logged out! Come back soon!', 'sucess')
return redirect(url_for('index'))
@app.route('/new_post', methods=('GET', 'POST'))
@login_required
def post():
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(), content=form.
content.data.strip())
flash('Message Posted! Thanks!', 'sucess')
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/')
def index():
return 'Hey!'
<|reserved_special_token_0|>
if __name__ == '__main__':
app.run(debug=DEBUG)
<|reserved_special_token_1|>
from flask import (Flask, g, render_template, flash, redirect, url_for)
from flask_login import (LoginManager, login_user, logout_user,
login_required, current_user)
import forms
import models
import sqlite3
DEBUG = True
app = Flask(__name__)
app.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.user.get(models.User.id == userid)
except models.DoesNotExist:
return None
def initialize():
models.DATABASE.connect()
models.DATABASE.create_tables([models.User], safe=True)
models.DATABASE.closer()
@app.before_request
def before_request():
""""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
"""""Close the database connection after request. """
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash("Yay, you registered", "sucess")
models.User.create_user(
username=form.username.data,
email=form.email.data,
password=form.password.data,
confrimpassword=form.password.data
)
return redirect(url_for('index'))
return render_template('register.html', form=form)
def check_password_hash(password, data):
pass
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.emails == form.email.data)
except models.DoesNOtExit:
flash("Your email or password doesn't match !", "error")
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in:", "Sucess")
return redirect(url_for('index'))
else:
flash("Your email or password doesn't match!", "error")
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("You.ve been logged out! Come back soon!", "sucess")
return redirect(url_for('index'))
@app.route('/new_post', methods=('GET', 'POST'))
@login_required #makes sures the user is logged in before been able to post
def post():
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(),
content=form.content.data.strip())
flash("Message Posted! Thanks!", "sucess")
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/')
def index():
return 'Hey!'
"""
models.initialize()
try:
models.User.create_user(
username='Steve',
email='[email protected]',
password='passsword',
admin=True
)
except ValueError:
pass
"""
if __name__ == '__main__':
app.run(debug=DEBUG)
|
flexible
|
{
"blob_id": "849c468e4890c19806c678089ec8668576538b12",
"index": 2717,
"step-1": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\[email protected]_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\n<mask token>\n\n\[email protected]('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\n<mask token>\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\[email protected]('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\[email protected]('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\[email protected]_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\n<mask token>\n\n\[email protected]('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", 'error')\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", 'Sucess')\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", 'error')\n return render_template('login.html', form=form)\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\[email protected]('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\[email protected]('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\[email protected]_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\[email protected]_request\ndef after_request(response):\n \"\"\"\"\"Close the database connection after request. \"\"\"\n g.db.close()\n return response\n\n\[email protected]('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", 'error')\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", 'Sucess')\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", 'error')\n return render_template('login.html', form=form)\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\[email protected]('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\[email protected]('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\n",
"step-4": "from flask import Flask, g, render_template, flash, redirect, url_for\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nimport forms\nimport models\nimport sqlite3\nDEBUG = True\napp = Flask(__name__)\napp.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\[email protected]_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\[email protected]_request\ndef after_request(response):\n \"\"\"\"\"Close the database connection after request. \"\"\"\n g.db.close()\n return response\n\n\[email protected]('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", 'error')\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", 'Sucess')\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", 'error')\n return render_template('login.html', form=form)\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\[email protected]('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\[email protected]('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=DEBUG)\n",
"step-5": "from flask import (Flask, g, render_template, flash, redirect, url_for)\nfrom flask_login import (LoginManager, login_user, logout_user,\n login_required, current_user)\n\nimport forms\nimport models\nimport sqlite3\n\nDEBUG = True\n\napp = Flask(__name__)\napp.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\[email protected]_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\[email protected]_request\ndef after_request(response):\n \"\"\"\"\"Close the database connection after request. \"\"\"\n g.db.close()\n return response\n\[email protected]('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash(\"Yay, you registered\", \"sucess\")\n models.User.create_user(\n username=form.username.data,\n email=form.email.data,\n password=form.password.data,\n confrimpassword=form.password.data\n )\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", \"error\")\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", \"Sucess\")\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", \"error\")\n return render_template('login.html', form=form)\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n flash(\"You.ve been logged out! Come back soon!\", \"sucess\")\n return redirect(url_for('index'))\n\[email protected]('/new_post', methods=('GET', 'POST'))\n@login_required #makes sures the user is logged in before been able to post\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(),\n content=form.content.data.strip())\n flash(\"Message Posted! Thanks!\", \"sucess\")\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\[email protected]('/')\ndef index():\n return 'Hey!'\n\n\"\"\"\nmodels.initialize()\ntry:\n models.User.create_user(\n username='Steve',\n email='[email protected]',\n password='passsword',\n admin=True\n )\n except ValueError:\n pass\n\"\"\" \nif __name__ == '__main__':\n app.run(debug=DEBUG)\n",
"step-ids": [
8,
9,
10,
13,
14
]
}
|
[
8,
9,
10,
13,
14
] |
<|reserved_special_token_0|>
def calcSuccess(predictedCounter, randAssault):
vidLabel.pack_forget()
if predictedCounter == 'parry_R':
instructionLabel.config(text='RIGHT PARRY')
if randAssault == 4 or randAssault == 2:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'parry_L':
instructionLabel.config(text='LEFT PARRY')
if randAssault == 5 or randAssault == 3:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'punch_R':
instructionLabel.config(text='RIGHT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 4:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'punch_L':
instructionLabel.config(text='LEFT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 5:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'weave_R':
instructionLabel.config(text='RIGHT WEAVE')
if randAssault == 1 or randAssault == 3 or randAssault == 5:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'weave_L':
instructionLabel.config(text='LEFT WEAVE')
if randAssault == 0 or randAssault == 2 or randAssault == 4:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'block':
instructionLabel.config(text='BLOCK')
if randAssault == 5 or randAssault == 4:
descriptionLabel.config(text="You've successfully blocked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 0 or randAssault == 1:
descriptionLabel.config(text="You've been grabbed!")
descriptionLabel.pack()
<|reserved_special_token_0|>
def show_frame(milliseconds):
if milliseconds > 0:
_, frame = cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(img)
vidLabel.imgtk = imgtk
vidLabel.config(image=imgtk)
root.update()
root.after(30, show_frame, milliseconds - 30)
_, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img = img.convert('RGB')
img.save('imgFile.jpeg')
if milliseconds == secondsChosen * 3000:
return ml.predict('imgFile.jpeg')
<|reserved_special_token_0|>
def runPrompt():
startButton.config(text='Next')
startButton.pack(side=LEFT)
resetButton.pack(side=RIGHT)
descriptionLabel.pack_forget()
assaultList = ['Grab from your right', 'Grab from your left',
'Blade attack from the right', 'Blade attack from the left',
'Hit from the right', 'Hit from the left']
counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',
'punch_L', 'block']
difficultyChoice = difficultyList.get(ACTIVE)
global secondsChosen
secondsChosen = 0
if difficultyChoice[0] == 'E':
secondsChosen = 6
elif difficultyChoice[0] == 'M':
secondsChosen = 3
else:
secondsChosen = 1
print(secondsChosen)
difficultyList.pack_forget()
randAssault = random.randint(0, 5)
instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)
)
vidLabel.pack()
predictedCounter = show_frame(secondsChosen * 1000)
if predictedCounter not in counterList:
predictedCounter = counterList[random.randint(0, 6)]
root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault
)
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calcSuccess(predictedCounter, randAssault):
vidLabel.pack_forget()
if predictedCounter == 'parry_R':
instructionLabel.config(text='RIGHT PARRY')
if randAssault == 4 or randAssault == 2:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'parry_L':
instructionLabel.config(text='LEFT PARRY')
if randAssault == 5 or randAssault == 3:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'punch_R':
instructionLabel.config(text='RIGHT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 4:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'punch_L':
instructionLabel.config(text='LEFT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 5:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'weave_R':
instructionLabel.config(text='RIGHT WEAVE')
if randAssault == 1 or randAssault == 3 or randAssault == 5:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'weave_L':
instructionLabel.config(text='LEFT WEAVE')
if randAssault == 0 or randAssault == 2 or randAssault == 4:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'block':
instructionLabel.config(text='BLOCK')
if randAssault == 5 or randAssault == 4:
descriptionLabel.config(text="You've successfully blocked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 0 or randAssault == 1:
descriptionLabel.config(text="You've been grabbed!")
descriptionLabel.pack()
<|reserved_special_token_0|>
root.geometry('2000x1100')
<|reserved_special_token_0|>
canvas.pack(side='top')
<|reserved_special_token_0|>
canvas.create_image(350, 100, image=logo)
<|reserved_special_token_0|>
descriptionLabel.pack(side='top')
<|reserved_special_token_0|>
instructionLabel.pack(side='top')
<|reserved_special_token_0|>
def show_frame(milliseconds):
if milliseconds > 0:
_, frame = cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(img)
vidLabel.imgtk = imgtk
vidLabel.config(image=imgtk)
root.update()
root.after(30, show_frame, milliseconds - 30)
_, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img = img.convert('RGB')
img.save('imgFile.jpeg')
if milliseconds == secondsChosen * 3000:
return ml.predict('imgFile.jpeg')
<|reserved_special_token_0|>
buttonFrame.pack(side='bottom')
<|reserved_special_token_0|>
difficultyList.insert(1, 'Easy: 6 seconds')
difficultyList.insert(2, 'Medium: 3 seconds')
difficultyList.insert(3, 'Hard: 1 seconds')
difficultyList.pack(side='top')
<|reserved_special_token_0|>
def runPrompt():
startButton.config(text='Next')
startButton.pack(side=LEFT)
resetButton.pack(side=RIGHT)
descriptionLabel.pack_forget()
assaultList = ['Grab from your right', 'Grab from your left',
'Blade attack from the right', 'Blade attack from the left',
'Hit from the right', 'Hit from the left']
counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',
'punch_L', 'block']
difficultyChoice = difficultyList.get(ACTIVE)
global secondsChosen
secondsChosen = 0
if difficultyChoice[0] == 'E':
secondsChosen = 6
elif difficultyChoice[0] == 'M':
secondsChosen = 3
else:
secondsChosen = 1
print(secondsChosen)
difficultyList.pack_forget()
randAssault = random.randint(0, 5)
instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)
)
vidLabel.pack()
predictedCounter = show_frame(secondsChosen * 1000)
if predictedCounter not in counterList:
predictedCounter = counterList[random.randint(0, 6)]
root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault
)
return 0
def reset():
resetButton.pack_forget()
startButton.config(text='Start')
startButton.pack(side=BOTTOM)
instructionLabel.config(text=instructionText, font=('Courier', 16))
descriptionLabel.config(text=descriptionText, font=('Courier', 18))
descriptionLabel.pack(side=TOP)
difficultyList.pack(side=TOP)
<|reserved_special_token_0|>
startButton.pack(side=BOTTOM)
<|reserved_special_token_0|>
root.mainloop()
cap.release()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calcSuccess(predictedCounter, randAssault):
vidLabel.pack_forget()
if predictedCounter == 'parry_R':
instructionLabel.config(text='RIGHT PARRY')
if randAssault == 4 or randAssault == 2:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'parry_L':
instructionLabel.config(text='LEFT PARRY')
if randAssault == 5 or randAssault == 3:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'punch_R':
instructionLabel.config(text='RIGHT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 4:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'punch_L':
instructionLabel.config(text='LEFT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 5:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'weave_R':
instructionLabel.config(text='RIGHT WEAVE')
if randAssault == 1 or randAssault == 3 or randAssault == 5:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'weave_L':
instructionLabel.config(text='LEFT WEAVE')
if randAssault == 0 or randAssault == 2 or randAssault == 4:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'block':
instructionLabel.config(text='BLOCK')
if randAssault == 5 or randAssault == 4:
descriptionLabel.config(text="You've successfully blocked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 0 or randAssault == 1:
descriptionLabel.config(text="You've been grabbed!")
descriptionLabel.pack()
cap = cv2.VideoCapture(0)
root = tk.Tk()
root.geometry('2000x1100')
ldFrame = Frame(root).pack(side='top')
canvas = Canvas(ldFrame, width=700, height=200)
canvas.pack(side='top')
pilLogo = Image.open('Logo.png')
logo = ImageTk.PhotoImage(pilLogo)
canvas.create_image(350, 100, image=logo)
descriptionText = (
'This program trains the user to respond in self defense to common physical threats.'
)
descriptionLabel = tk.Label(ldFrame, justify='center', padx=10, font=(
'Courier', 18), wraplength=1900, text=descriptionText)
descriptionLabel.pack(side='top')
centerFrame = Frame(root).pack()
countdownLabel = tk.Label(centerFrame, justify='center', font=('Courier',
20), text='')
instructionText = (
'In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.'
)
instructionLabel = tk.Label(centerFrame, justify='center', padx=50, pady=50,
font=('Courier', 16), wraplength=1800, text=instructionText)
instructionLabel.pack(side='top')
vidLabel = Label(root)
def show_frame(milliseconds):
if milliseconds > 0:
_, frame = cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(img)
vidLabel.imgtk = imgtk
vidLabel.config(image=imgtk)
root.update()
root.after(30, show_frame, milliseconds - 30)
_, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img = img.convert('RGB')
img.save('imgFile.jpeg')
if milliseconds == secondsChosen * 3000:
return ml.predict('imgFile.jpeg')
buttonFrame = Frame(root)
buttonFrame.pack(side='bottom')
difficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(
'Courier', 16))
difficultyList.insert(1, 'Easy: 6 seconds')
difficultyList.insert(2, 'Medium: 3 seconds')
difficultyList.insert(3, 'Hard: 1 seconds')
difficultyList.pack(side='top')
cycling = True
def runPrompt():
startButton.config(text='Next')
startButton.pack(side=LEFT)
resetButton.pack(side=RIGHT)
descriptionLabel.pack_forget()
assaultList = ['Grab from your right', 'Grab from your left',
'Blade attack from the right', 'Blade attack from the left',
'Hit from the right', 'Hit from the left']
counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',
'punch_L', 'block']
difficultyChoice = difficultyList.get(ACTIVE)
global secondsChosen
secondsChosen = 0
if difficultyChoice[0] == 'E':
secondsChosen = 6
elif difficultyChoice[0] == 'M':
secondsChosen = 3
else:
secondsChosen = 1
print(secondsChosen)
difficultyList.pack_forget()
randAssault = random.randint(0, 5)
instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)
)
vidLabel.pack()
predictedCounter = show_frame(secondsChosen * 1000)
if predictedCounter not in counterList:
predictedCounter = counterList[random.randint(0, 6)]
root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault
)
return 0
def reset():
resetButton.pack_forget()
startButton.config(text='Start')
startButton.pack(side=BOTTOM)
instructionLabel.config(text=instructionText, font=('Courier', 16))
descriptionLabel.config(text=descriptionText, font=('Courier', 18))
descriptionLabel.pack(side=TOP)
difficultyList.pack(side=TOP)
startButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier',
16), text='Start', fg='green', command=runPrompt)
startButton.pack(side=BOTTOM)
resetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier',
16), text='Reset', fg='red', command=reset)
root.mainloop()
cap.release()
<|reserved_special_token_1|>
import tkinter as tk
from tkinter import *
from PIL import ImageTk
from PIL import Image
import cv2
import numpy as np
from statistics import mode
import time
import random
import predict as ml
def calcSuccess(predictedCounter, randAssault):
vidLabel.pack_forget()
if predictedCounter == 'parry_R':
instructionLabel.config(text='RIGHT PARRY')
if randAssault == 4 or randAssault == 2:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'parry_L':
instructionLabel.config(text='LEFT PARRY')
if randAssault == 5 or randAssault == 3:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'punch_R':
instructionLabel.config(text='RIGHT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 4:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'punch_L':
instructionLabel.config(text='LEFT PUNCH')
if randAssault == 0 or randAssault == 1 or randAssault == 5:
descriptionLabel.config(text=
"You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == 'weave_R':
instructionLabel.config(text='RIGHT WEAVE')
if randAssault == 1 or randAssault == 3 or randAssault == 5:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'weave_L':
instructionLabel.config(text='LEFT WEAVE')
if randAssault == 0 or randAssault == 2 or randAssault == 4:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == 'block':
instructionLabel.config(text='BLOCK')
if randAssault == 5 or randAssault == 4:
descriptionLabel.config(text="You've successfully blocked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 0 or randAssault == 1:
descriptionLabel.config(text="You've been grabbed!")
descriptionLabel.pack()
cap = cv2.VideoCapture(0)
root = tk.Tk()
root.geometry('2000x1100')
ldFrame = Frame(root).pack(side='top')
canvas = Canvas(ldFrame, width=700, height=200)
canvas.pack(side='top')
pilLogo = Image.open('Logo.png')
logo = ImageTk.PhotoImage(pilLogo)
canvas.create_image(350, 100, image=logo)
descriptionText = (
'This program trains the user to respond in self defense to common physical threats.'
)
descriptionLabel = tk.Label(ldFrame, justify='center', padx=10, font=(
'Courier', 18), wraplength=1900, text=descriptionText)
descriptionLabel.pack(side='top')
centerFrame = Frame(root).pack()
countdownLabel = tk.Label(centerFrame, justify='center', font=('Courier',
20), text='')
instructionText = (
'In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.'
)
instructionLabel = tk.Label(centerFrame, justify='center', padx=50, pady=50,
font=('Courier', 16), wraplength=1800, text=instructionText)
instructionLabel.pack(side='top')
vidLabel = Label(root)
def show_frame(milliseconds):
if milliseconds > 0:
_, frame = cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(img)
vidLabel.imgtk = imgtk
vidLabel.config(image=imgtk)
root.update()
root.after(30, show_frame, milliseconds - 30)
_, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img = img.convert('RGB')
img.save('imgFile.jpeg')
if milliseconds == secondsChosen * 3000:
return ml.predict('imgFile.jpeg')
buttonFrame = Frame(root)
buttonFrame.pack(side='bottom')
difficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(
'Courier', 16))
difficultyList.insert(1, 'Easy: 6 seconds')
difficultyList.insert(2, 'Medium: 3 seconds')
difficultyList.insert(3, 'Hard: 1 seconds')
difficultyList.pack(side='top')
cycling = True
def runPrompt():
startButton.config(text='Next')
startButton.pack(side=LEFT)
resetButton.pack(side=RIGHT)
descriptionLabel.pack_forget()
assaultList = ['Grab from your right', 'Grab from your left',
'Blade attack from the right', 'Blade attack from the left',
'Hit from the right', 'Hit from the left']
counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',
'punch_L', 'block']
difficultyChoice = difficultyList.get(ACTIVE)
global secondsChosen
secondsChosen = 0
if difficultyChoice[0] == 'E':
secondsChosen = 6
elif difficultyChoice[0] == 'M':
secondsChosen = 3
else:
secondsChosen = 1
print(secondsChosen)
difficultyList.pack_forget()
randAssault = random.randint(0, 5)
instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)
)
vidLabel.pack()
predictedCounter = show_frame(secondsChosen * 1000)
if predictedCounter not in counterList:
predictedCounter = counterList[random.randint(0, 6)]
root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault
)
return 0
def reset():
resetButton.pack_forget()
startButton.config(text='Start')
startButton.pack(side=BOTTOM)
instructionLabel.config(text=instructionText, font=('Courier', 16))
descriptionLabel.config(text=descriptionText, font=('Courier', 18))
descriptionLabel.pack(side=TOP)
difficultyList.pack(side=TOP)
startButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier',
16), text='Start', fg='green', command=runPrompt)
startButton.pack(side=BOTTOM)
resetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier',
16), text='Reset', fg='red', command=reset)
root.mainloop()
cap.release()
<|reserved_special_token_1|>
#THIS IS PYTHON3
import tkinter as tk
from tkinter import *
from PIL import ImageTk
from PIL import Image #to handle non-gif image formats
import cv2
import numpy as np
from statistics import mode
import time
import random
import predict as ml
def calcSuccess(predictedCounter, randAssault):
vidLabel.pack_forget()
if predictedCounter == "parry_R":
instructionLabel.config(text="RIGHT PARRY")
if randAssault == 4 or randAssault == 2:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "parry_L":
instructionLabel.config(text="LEFT PARRY")
if randAssault == 5 or randAssault == 3:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "punch_R":
instructionLabel.config(text="RIGHT PUNCH")
if randAssault == 0 or randAssault == 1 or randAssault == 4:
descriptionLabel.config(text="You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == "punch_L":
instructionLabel.config(text="LEFT PUNCH")
if randAssault == 0 or randAssault == 1 or randAssault == 5:
descriptionLabel.config(text="You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == "weave_R":
instructionLabel.config(text="RIGHT WEAVE")
if randAssault == 1 or randAssault == 3 or randAssault == 5:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "weave_L":
instructionLabel.config(text="LEFT WEAVE")
if randAssault == 0 or randAssault == 2 or randAssault == 4:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "block":
instructionLabel.config(text="BLOCK")
if randAssault == 5 or randAssault == 4:
descriptionLabel.config(text="You've successfully blocked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 0 or randAssault == 1:
descriptionLabel.config(text="You've been grabbed!")
descriptionLabel.pack()
cap = cv2.VideoCapture(0)
root = tk.Tk() #initialize tkinter by making tk rook widget--consists of window with tile bar and decoration provided by window manager. Root widget must be made first and can only be one.
root.geometry("2000x1100")
ldFrame = Frame(root).pack(side="top") #frame to hold logo and description
canvas = Canvas(ldFrame, width=700, height=200)
canvas.pack(side="top")
#open image with pil image because PhotoImage only takes gif
pilLogo = Image.open("Logo.png")
logo = ImageTk.PhotoImage(pilLogo) #makes PhotoImage from pil image
canvas.create_image(350, 100, image=logo) #adds PhotoImage to Canvas
#make basic description label from text string on the logo description frame
descriptionText = """This program trains the user to respond in self defense to common physical threats."""
descriptionLabel = tk.Label(ldFrame, justify="center", padx=10, font=("Courier", 18), wraplength=1900, text=descriptionText)
descriptionLabel.pack(side="top")
#make center frame that will show instructions initially and then have "assaulter" prompts and live video
centerFrame = Frame(root).pack()
countdownLabel = tk.Label(centerFrame, justify="center", font=("Courier", 20), text="") #invisible for now because not packed
instructionText = """In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session."""
instructionLabel = tk.Label(centerFrame, justify="center", padx=50, pady=50, font=("Courier", 16), wraplength=1800, text=instructionText)
instructionLabel.pack(side="top")
#setup to capture video frames
vidLabel = Label(root)
def show_frame(milliseconds):
if milliseconds > 0:
#global predictionArr
_, frame = cap.read()
#predictionArr.append(predict.get_prediction(frame, "ace-connection-236822", "ICN2459521650166688930"))
frame = cv2.flip(frame, 1) #horizontally flips images so is like reflection
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) #makes normal color
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(img)
vidLabel.imgtk = imgtk
vidLabel.config(image=imgtk)
root.update()
root.after(30, show_frame, (milliseconds-30))
_, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img = img.convert("RGB")
img.save("imgFile.jpeg")
if milliseconds == secondsChosen*3000:
return ml.predict("imgFile.jpeg")
#make bottom frame that hold buttons
buttonFrame = Frame(root)
buttonFrame.pack(side="bottom")
difficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=("Courier", 16))
difficultyList.insert(1, "Easy: 6 seconds")
difficultyList.insert(2, "Medium: 3 seconds")
difficultyList.insert(3, "Hard: 1 seconds")
difficultyList.pack(side="top")
cycling = True
def runPrompt():
startButton.config(text="Next")
startButton.pack(side=LEFT)
resetButton.pack(side=RIGHT)
descriptionLabel.pack_forget()
assaultList = ["Grab from your right", "Grab from your left", "Blade attack from the right", "Blade attack from the left", "Hit from the right", "Hit from the left"]
counterList = ["parry_R", "parry_L", "weave_R", "weave_L", "punch_R", "punch_L", "block"]
difficultyChoice = (difficultyList.get(ACTIVE))
global secondsChosen
secondsChosen = 0
if difficultyChoice[0] == "E":
secondsChosen = 6
elif difficultyChoice[0] == "M":
secondsChosen = 3
else:
secondsChosen = 1
print(secondsChosen)
difficultyList.pack_forget()
randAssault = random.randint(0, 5)
instructionLabel.config(text=assaultList[randAssault], font=("Courier", 25))
vidLabel.pack()
predictedCounter = show_frame(secondsChosen*1000)
if predictedCounter not in counterList:
predictedCounter = counterList[random.randint(0, 6)]
root.after(secondsChosen*1200, calcSuccess, predictedCounter, randAssault)
return 0
def reset():
resetButton.pack_forget()
startButton.config(text="Start")
startButton.pack(side=BOTTOM)
instructionLabel.config(text=instructionText, font=("Courier", 16))
descriptionLabel.config(text=descriptionText, font=("Courier", 18))
descriptionLabel.pack(side=TOP)
difficultyList.pack(side=TOP)
startButton = Button(buttonFrame, bd=6, padx=20, pady=20,font=("Courier", 16), text="Start", fg="green", command=runPrompt)
startButton.pack(side=BOTTOM)
resetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=("Courier", 16), text="Reset", fg="red", command=reset)
root.mainloop()
cap.release()
|
flexible
|
{
"blob_id": "8cf6a9243182a4f6b68199a8967e06790396dc10",
"index": 5967,
"step-1": "<mask token>\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\n<mask token>\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\n<mask token>\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\n<mask token>\nroot.geometry('2000x1100')\n<mask token>\ncanvas.pack(side='top')\n<mask token>\ncanvas.create_image(350, 100, image=logo)\n<mask token>\ndescriptionLabel.pack(side='top')\n<mask token>\ninstructionLabel.pack(side='top')\n<mask token>\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\n<mask token>\nbuttonFrame.pack(side='bottom')\n<mask token>\ndifficultyList.insert(1, 'Easy: 6 seconds')\ndifficultyList.insert(2, 'Medium: 3 seconds')\ndifficultyList.insert(3, 'Hard: 1 seconds')\ndifficultyList.pack(side='top')\n<mask token>\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text='Start')\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=('Courier', 16))\n descriptionLabel.config(text=descriptionText, font=('Courier', 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\n<mask token>\nstartButton.pack(side=BOTTOM)\n<mask token>\nroot.mainloop()\ncap.release()\n",
"step-3": "<mask token>\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\ncap = cv2.VideoCapture(0)\nroot = tk.Tk()\nroot.geometry('2000x1100')\nldFrame = Frame(root).pack(side='top')\ncanvas = Canvas(ldFrame, width=700, height=200)\ncanvas.pack(side='top')\npilLogo = Image.open('Logo.png')\nlogo = ImageTk.PhotoImage(pilLogo)\ncanvas.create_image(350, 100, image=logo)\ndescriptionText = (\n 'This program trains the user to respond in self defense to common physical threats.'\n )\ndescriptionLabel = tk.Label(ldFrame, justify='center', padx=10, font=(\n 'Courier', 18), wraplength=1900, text=descriptionText)\ndescriptionLabel.pack(side='top')\ncenterFrame = Frame(root).pack()\ncountdownLabel = tk.Label(centerFrame, justify='center', font=('Courier', \n 20), text='')\ninstructionText = (\n 'In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.'\n )\ninstructionLabel = tk.Label(centerFrame, justify='center', padx=50, pady=50,\n font=('Courier', 16), wraplength=1800, text=instructionText)\ninstructionLabel.pack(side='top')\nvidLabel = Label(root)\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\nbuttonFrame = Frame(root)\nbuttonFrame.pack(side='bottom')\ndifficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(\n 'Courier', 16))\ndifficultyList.insert(1, 'Easy: 6 seconds')\ndifficultyList.insert(2, 'Medium: 3 seconds')\ndifficultyList.insert(3, 'Hard: 1 seconds')\ndifficultyList.pack(side='top')\ncycling = True\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text='Start')\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=('Courier', 16))\n descriptionLabel.config(text=descriptionText, font=('Courier', 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\nstartButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Start', fg='green', command=runPrompt)\nstartButton.pack(side=BOTTOM)\nresetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Reset', fg='red', command=reset)\nroot.mainloop()\ncap.release()\n",
"step-4": "import tkinter as tk\nfrom tkinter import *\nfrom PIL import ImageTk\nfrom PIL import Image\nimport cv2\nimport numpy as np\nfrom statistics import mode\nimport time\nimport random\nimport predict as ml\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\ncap = cv2.VideoCapture(0)\nroot = tk.Tk()\nroot.geometry('2000x1100')\nldFrame = Frame(root).pack(side='top')\ncanvas = Canvas(ldFrame, width=700, height=200)\ncanvas.pack(side='top')\npilLogo = Image.open('Logo.png')\nlogo = ImageTk.PhotoImage(pilLogo)\ncanvas.create_image(350, 100, image=logo)\ndescriptionText = (\n 'This program trains the user to respond in self defense to common physical threats.'\n )\ndescriptionLabel = tk.Label(ldFrame, justify='center', padx=10, font=(\n 'Courier', 18), wraplength=1900, text=descriptionText)\ndescriptionLabel.pack(side='top')\ncenterFrame = Frame(root).pack()\ncountdownLabel = tk.Label(centerFrame, justify='center', font=('Courier', \n 20), text='')\ninstructionText = (\n 'In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.'\n )\ninstructionLabel = tk.Label(centerFrame, justify='center', padx=50, pady=50,\n font=('Courier', 16), wraplength=1800, text=instructionText)\ninstructionLabel.pack(side='top')\nvidLabel = Label(root)\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\nbuttonFrame = Frame(root)\nbuttonFrame.pack(side='bottom')\ndifficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(\n 'Courier', 16))\ndifficultyList.insert(1, 'Easy: 6 seconds')\ndifficultyList.insert(2, 'Medium: 3 seconds')\ndifficultyList.insert(3, 'Hard: 1 seconds')\ndifficultyList.pack(side='top')\ncycling = True\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text='Start')\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=('Courier', 16))\n descriptionLabel.config(text=descriptionText, font=('Courier', 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\nstartButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Start', fg='green', command=runPrompt)\nstartButton.pack(side=BOTTOM)\nresetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Reset', fg='red', command=reset)\nroot.mainloop()\ncap.release()\n",
"step-5": "#THIS IS PYTHON3\nimport tkinter as tk\nfrom tkinter import *\nfrom PIL import ImageTk\nfrom PIL import Image #to handle non-gif image formats\n\nimport cv2\nimport numpy as np\nfrom statistics import mode\n\nimport time\n\nimport random\n\nimport predict as ml\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == \"parry_R\":\n instructionLabel.config(text=\"RIGHT PARRY\")\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"parry_L\":\n instructionLabel.config(text=\"LEFT PARRY\")\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"punch_R\":\n instructionLabel.config(text=\"RIGHT PUNCH\")\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n\n if predictedCounter == \"punch_L\":\n instructionLabel.config(text=\"LEFT PUNCH\")\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n\n if predictedCounter == \"weave_R\":\n instructionLabel.config(text=\"RIGHT WEAVE\")\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"weave_L\":\n instructionLabel.config(text=\"LEFT WEAVE\")\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"block\":\n instructionLabel.config(text=\"BLOCK\")\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\ncap = cv2.VideoCapture(0)\n\nroot = tk.Tk() #initialize tkinter by making tk rook widget--consists of window with tile bar and decoration provided by window manager. Root widget must be made first and can only be one.\nroot.geometry(\"2000x1100\")\n\nldFrame = Frame(root).pack(side=\"top\") #frame to hold logo and description\ncanvas = Canvas(ldFrame, width=700, height=200)\ncanvas.pack(side=\"top\")\n\n#open image with pil image because PhotoImage only takes gif\npilLogo = Image.open(\"Logo.png\")\nlogo = ImageTk.PhotoImage(pilLogo) #makes PhotoImage from pil image\ncanvas.create_image(350, 100, image=logo) #adds PhotoImage to Canvas\n\n#make basic description label from text string on the logo description frame\ndescriptionText = \"\"\"This program trains the user to respond in self defense to common physical threats.\"\"\"\ndescriptionLabel = tk.Label(ldFrame, justify=\"center\", padx=10, font=(\"Courier\", 18), wraplength=1900, text=descriptionText)\ndescriptionLabel.pack(side=\"top\")\n\n#make center frame that will show instructions initially and then have \"assaulter\" prompts and live video\ncenterFrame = Frame(root).pack()\ncountdownLabel = tk.Label(centerFrame, justify=\"center\", font=(\"Courier\", 20), text=\"\") #invisible for now because not packed\ninstructionText = \"\"\"In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.\"\"\"\ninstructionLabel = tk.Label(centerFrame, justify=\"center\", padx=50, pady=50, font=(\"Courier\", 16), wraplength=1800, text=instructionText)\ninstructionLabel.pack(side=\"top\")\n\n#setup to capture video frames\nvidLabel = Label(root)\ndef show_frame(milliseconds):\n if milliseconds > 0:\n #global predictionArr\n _, frame = cap.read()\n #predictionArr.append(predict.get_prediction(frame, \"ace-connection-236822\", \"ICN2459521650166688930\"))\n frame = cv2.flip(frame, 1) #horizontally flips images so is like reflection\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) #makes normal color\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, (milliseconds-30))\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) \n img = Image.fromarray(cv2image)\n img = img.convert(\"RGB\")\n img.save(\"imgFile.jpeg\")\n if milliseconds == secondsChosen*3000: \n return ml.predict(\"imgFile.jpeg\")\n\n#make bottom frame that hold buttons\nbuttonFrame = Frame(root)\nbuttonFrame.pack(side=\"bottom\")\ndifficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(\"Courier\", 16))\ndifficultyList.insert(1, \"Easy: 6 seconds\")\ndifficultyList.insert(2, \"Medium: 3 seconds\")\ndifficultyList.insert(3, \"Hard: 1 seconds\")\ndifficultyList.pack(side=\"top\")\n\ncycling = True\n\ndef runPrompt():\n startButton.config(text=\"Next\")\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = [\"Grab from your right\", \"Grab from your left\", \"Blade attack from the right\", \"Blade attack from the left\", \"Hit from the right\", \"Hit from the left\"]\n counterList = [\"parry_R\", \"parry_L\", \"weave_R\", \"weave_L\", \"punch_R\", \"punch_L\", \"block\"]\n difficultyChoice = (difficultyList.get(ACTIVE))\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == \"E\":\n secondsChosen = 6\n elif difficultyChoice[0] == \"M\":\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=(\"Courier\", 25))\n vidLabel.pack()\n \n predictedCounter = show_frame(secondsChosen*1000)\n \n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n \n root.after(secondsChosen*1200, calcSuccess, predictedCounter, randAssault)\n\n return 0\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text=\"Start\")\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=(\"Courier\", 16))\n descriptionLabel.config(text=descriptionText, font=(\"Courier\", 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\nstartButton = Button(buttonFrame, bd=6, padx=20, pady=20,font=(\"Courier\", 16), text=\"Start\", fg=\"green\", command=runPrompt)\nstartButton.pack(side=BOTTOM)\nresetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=(\"Courier\", 16), text=\"Reset\", fg=\"red\", command=reset)\n \n\nroot.mainloop()\ncap.release()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Folder(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
verbose_name_plural = 'Folders/Categories'
class Bookmark(models.Model):
name = models.CharField(max_length=200)
url = models.CharField(max_length=400)
folder = models.ForeignKey(Folder, on_delete=models.CASCADE)
date_of_creation = models.DateTimeField(default=datetime.now())
notes = models.TextField()
def __str__(self):
return self.name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Folder(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.folder
class Meta:
verbose_name_plural = 'Folders/Categories'
class Bookmark(models.Model):
name = models.CharField(max_length=200)
url = models.CharField(max_length=400)
folder = models.ForeignKey(Folder, on_delete=models.CASCADE)
date_of_creation = models.DateTimeField(default=datetime.now())
notes = models.TextField()
def __str__(self):
return self.name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Folder(models.Model):
folder = models.CharField(max_length=200, default='misc')
num_of_entries = models.IntegerField(default=0)
def __str__(self):
return self.folder
class Meta:
verbose_name_plural = 'Folders/Categories'
class Bookmark(models.Model):
name = models.CharField(max_length=200)
url = models.CharField(max_length=400)
folder = models.ForeignKey(Folder, on_delete=models.CASCADE)
date_of_creation = models.DateTimeField(default=datetime.now())
notes = models.TextField()
def __str__(self):
return self.name
<|reserved_special_token_1|>
from django.db import models
from datetime import datetime
class Folder(models.Model):
folder = models.CharField(max_length=200, default='misc')
num_of_entries = models.IntegerField(default=0)
def __str__(self):
return self.folder
class Meta:
verbose_name_plural = 'Folders/Categories'
class Bookmark(models.Model):
name = models.CharField(max_length=200)
url = models.CharField(max_length=400)
folder = models.ForeignKey(Folder, on_delete=models.CASCADE)
date_of_creation = models.DateTimeField(default=datetime.now())
notes = models.TextField()
def __str__(self):
return self.name
<|reserved_special_token_1|>
from django.db import models
from datetime import datetime
class Folder(models.Model):
folder = models.CharField(max_length=200, default = "misc")
num_of_entries = models.IntegerField(default=0)
def __str__(self):
return self.folder
class Meta:
verbose_name_plural = "Folders/Categories"
class Bookmark(models.Model):
name = models.CharField(max_length=200)
url = models.CharField(max_length=400)
folder = models.ForeignKey(Folder, on_delete=models.CASCADE)
date_of_creation = models.DateTimeField(default=datetime.now())
notes = models.TextField()
def __str__(self):
return self.name
|
flexible
|
{
"blob_id": "ca3cdbd5d5d30be4f40925366994c3ea9d9b9614",
"index": 3195,
"step-1": "<mask token>\n\n\nclass Folder(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Folder(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.folder\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Folder(models.Model):\n folder = models.CharField(max_length=200, default='misc')\n num_of_entries = models.IntegerField(default=0)\n\n def __str__(self):\n return self.folder\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\nfrom datetime import datetime\n\n\nclass Folder(models.Model):\n folder = models.CharField(max_length=200, default='misc')\n num_of_entries = models.IntegerField(default=0)\n\n def __str__(self):\n return self.folder\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\nfrom datetime import datetime\n\nclass Folder(models.Model):\n\tfolder = models.CharField(max_length=200, default = \"misc\")\n\tnum_of_entries = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn self.folder\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Folders/Categories\"\n\nclass Bookmark(models.Model):\n\tname = models.CharField(max_length=200)\n\turl = models.CharField(max_length=400)\n\tfolder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n\tdate_of_creation = models.DateTimeField(default=datetime.now())\n\tnotes = models.TextField()\n\n\tdef __str__(self):\n\t\treturn self.name\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from app01 import models
from rest_framework.views import APIView
# from api.utils.response import BaseResponse
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from api.serializers.course import DegreeCourseSerializer
# 查询所有学位课程
class DegreeCourseView(APIView):
def get(self,request,*args,**kwargs):
response = {'code':100,'data':None,'error':None}
try:
# 从数据库获取数据
degreecourse_list = models.DegreeCourse.objects.all()
# 分页
# page = PageNumberPagination()
# course_list = page.paginate_queryset(queryset,request,self)
# 分页之后的结果执行序列化
ser_obj = DegreeCourseSerializer(degreecourse_list,many=True)
response['data'] = ser_obj.data
except Exception as e:
response['error'] = '获取数据失败'
return Response(response)
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
|
normal
|
{
"blob_id": "2b3f8b1ac4735785683c00f6e6ced85d201de53f",
"index": 8567,
"step-1": "<mask token>\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-2": "<mask token>\n\n\nclass DegreeCourseView(APIView):\n <mask token>\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-3": "<mask token>\n\n\nclass DegreeCourseView(APIView):\n\n def get(self, request, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degreecourse_list = models.DegreeCourse.objects.all()\n ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)\n response['data'] = ser_obj.data\n except Exception as e:\n response['error'] = '获取数据失败'\n return Response(response)\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-4": "from app01 import models\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom api.serializers.course import DegreeCourseSerializer\n\n\nclass DegreeCourseView(APIView):\n\n def get(self, request, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degreecourse_list = models.DegreeCourse.objects.all()\n ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)\n response['data'] = ser_obj.data\n except Exception as e:\n response['error'] = '获取数据失败'\n return Response(response)\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-5": "from app01 import models\nfrom rest_framework.views import APIView\n# from api.utils.response import BaseResponse\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom api.serializers.course import DegreeCourseSerializer\n\n\n# 查询所有学位课程\n\nclass DegreeCourseView(APIView):\n\n def get(self,request,*args,**kwargs):\n response = {'code':100,'data':None,'error':None}\n\n try:\n # 从数据库获取数据\n degreecourse_list = models.DegreeCourse.objects.all()\n\n # 分页\n # page = PageNumberPagination()\n # course_list = page.paginate_queryset(queryset,request,self)\n\n # 分页之后的结果执行序列化\n ser_obj = DegreeCourseSerializer(degreecourse_list,many=True)\n\n response['data'] = ser_obj.data\n except Exception as e:\n\n response['error'] = '获取数据失败'\n\n return Response(response)\n\n\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conn.request('GET', '/teams/statistics?season=2016&team=768&league=4',
headers=headers)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conn = http.client.HTTPSConnection('v3.football.api-sports.io')
headers = {'x-rapidapi-host': 'v3.football.api-sports.io', 'x-rapidapi-key': ''
}
conn.request('GET', '/teams/statistics?season=2016&team=768&league=4',
headers=headers)
res = conn.getresponse()
data = res.read()
pretty = json.loads(data)
<|reserved_special_token_1|>
import http.client
import json
conn = http.client.HTTPSConnection('v3.football.api-sports.io')
headers = {'x-rapidapi-host': 'v3.football.api-sports.io', 'x-rapidapi-key': ''
}
conn.request('GET', '/teams/statistics?season=2016&team=768&league=4',
headers=headers)
res = conn.getresponse()
data = res.read()
pretty = json.loads(data)
<|reserved_special_token_1|>
import http.client
import json
conn = http.client.HTTPSConnection("v3.football.api-sports.io")
headers = {
'x-rapidapi-host': "v3.football.api-sports.io",
'x-rapidapi-key': ""
}
conn.request("GET", "/teams/statistics?season=2016&team=768&league=4", headers=headers)
res = conn.getresponse()
data = res.read()
pretty = json.loads(data)
|
flexible
|
{
"blob_id": "a6617934c5e6527cf59225a5d159d1ce8a33db50",
"index": 6681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconn.request('GET', '/teams/statistics?season=2016&team=768&league=4',\n headers=headers)\n<mask token>\n",
"step-3": "<mask token>\nconn = http.client.HTTPSConnection('v3.football.api-sports.io')\nheaders = {'x-rapidapi-host': 'v3.football.api-sports.io', 'x-rapidapi-key': ''\n }\nconn.request('GET', '/teams/statistics?season=2016&team=768&league=4',\n headers=headers)\nres = conn.getresponse()\ndata = res.read()\npretty = json.loads(data)\n",
"step-4": "import http.client\nimport json\nconn = http.client.HTTPSConnection('v3.football.api-sports.io')\nheaders = {'x-rapidapi-host': 'v3.football.api-sports.io', 'x-rapidapi-key': ''\n }\nconn.request('GET', '/teams/statistics?season=2016&team=768&league=4',\n headers=headers)\nres = conn.getresponse()\ndata = res.read()\npretty = json.loads(data)\n",
"step-5": "import http.client\nimport json\n\nconn = http.client.HTTPSConnection(\"v3.football.api-sports.io\")\n\nheaders = {\n 'x-rapidapi-host': \"v3.football.api-sports.io\",\n 'x-rapidapi-key': \"\"\n }\n\nconn.request(\"GET\", \"/teams/statistics?season=2016&team=768&league=4\", headers=headers)\n\nres = conn.getresponse()\ndata = res.read()\npretty = json.loads(data)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
n, imp = list(map(int, input().split()))
villagers = {}
peoples = []
susList = set()
for i in range(n):
peeps = set(list(map(int, input().split()))[1:])
# Initialize the set
villagers[i+1] = villagers.get(i+1, set())
for p in peeps:
if i+1 in peeps:
susList.add(i+1)
break
villagers[p] = villagers.get(p, set()) | {i+1}
peoples.append(peeps)
# Confirmed imposters
queue = [s for s in susList]
while queue:
# Everyone that voted for them is an imposter
s = queue.pop()
queue.extend(list(villagers[s]))
susList |= set(villagers[s])
villagers[s] = set()
# Discredit all imposter votes
for s in susList:
for p in peoples[s-1]:
try:
villagers[p].remove(s)
except:
pass
for k, v in sorted(villagers.items(), key=lambda x: x[0]):
if imp - len(susList) >= (n- len(susList)) // 2:
print(0)
elif k in susList:
print(0)
elif len(v) >= imp - len(susList):
print(1)
else:
print(0)
|
normal
|
{
"blob_id": "3eca3066a6c6484257ca17164d35654812a87b80",
"index": 6636,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n peeps = set(list(map(int, input().split()))[1:])\n villagers[i + 1] = villagers.get(i + 1, set())\n for p in peeps:\n if i + 1 in peeps:\n susList.add(i + 1)\n break\n villagers[p] = villagers.get(p, set()) | {i + 1}\n peoples.append(peeps)\n<mask token>\nwhile queue:\n s = queue.pop()\n queue.extend(list(villagers[s]))\n susList |= set(villagers[s])\n villagers[s] = set()\nfor s in susList:\n for p in peoples[s - 1]:\n try:\n villagers[p].remove(s)\n except:\n pass\nfor k, v in sorted(villagers.items(), key=lambda x: x[0]):\n if imp - len(susList) >= (n - len(susList)) // 2:\n print(0)\n elif k in susList:\n print(0)\n elif len(v) >= imp - len(susList):\n print(1)\n else:\n print(0)\n",
"step-3": "n, imp = list(map(int, input().split()))\nvillagers = {}\npeoples = []\nsusList = set()\nfor i in range(n):\n peeps = set(list(map(int, input().split()))[1:])\n villagers[i + 1] = villagers.get(i + 1, set())\n for p in peeps:\n if i + 1 in peeps:\n susList.add(i + 1)\n break\n villagers[p] = villagers.get(p, set()) | {i + 1}\n peoples.append(peeps)\nqueue = [s for s in susList]\nwhile queue:\n s = queue.pop()\n queue.extend(list(villagers[s]))\n susList |= set(villagers[s])\n villagers[s] = set()\nfor s in susList:\n for p in peoples[s - 1]:\n try:\n villagers[p].remove(s)\n except:\n pass\nfor k, v in sorted(villagers.items(), key=lambda x: x[0]):\n if imp - len(susList) >= (n - len(susList)) // 2:\n print(0)\n elif k in susList:\n print(0)\n elif len(v) >= imp - len(susList):\n print(1)\n else:\n print(0)\n",
"step-4": "n, imp = list(map(int, input().split()))\nvillagers = {}\npeoples = []\nsusList = set()\nfor i in range(n):\n peeps = set(list(map(int, input().split()))[1:])\n # Initialize the set\n villagers[i+1] = villagers.get(i+1, set())\n for p in peeps:\n if i+1 in peeps:\n susList.add(i+1)\n break\n villagers[p] = villagers.get(p, set()) | {i+1}\n peoples.append(peeps)\n\n# Confirmed imposters\nqueue = [s for s in susList]\nwhile queue:\n # Everyone that voted for them is an imposter\n s = queue.pop()\n queue.extend(list(villagers[s]))\n susList |= set(villagers[s])\n villagers[s] = set()\n\n# Discredit all imposter votes\nfor s in susList:\n for p in peoples[s-1]:\n try:\n villagers[p].remove(s)\n except:\n pass\n\n\n\nfor k, v in sorted(villagers.items(), key=lambda x: x[0]):\n if imp - len(susList) >= (n- len(susList)) // 2:\n print(0)\n elif k in susList:\n print(0)\n elif len(v) >= imp - len(susList):\n print(1)\n else:\n print(0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""This is the body of the low-level worker tool.
A worker is intended to run as a process that imports a module, mutates it in
one location with one operator, runs the tests, reports the results, and dies.
"""
import difflib
import importlib
import inspect
import json
import logging
import subprocess
import sys
import traceback
import astunparse
try:
import typing # the typing module does some fancy stuff at import time
# which we shall not do twice... by loading it here,
# preserve_modules does not delete it and therefore
# fancy stuff happens only once
except ImportError:
pass
from .config import serialize_config
from .importing import preserve_modules, using_ast
from .mutating import MutatingCore
from .parsing import get_ast
from .testing.test_runner import TestOutcome
from .work_item import WorkItem
log = logging.getLogger()
class WorkerOutcome:
"""Possible outcomes for a worker.
"""
NORMAL = 'normal'
EXCEPTION = 'exception'
NO_TEST = 'no-test'
TIMEOUT = 'timeout'
SKIPPED = 'skipped'
def worker(module_name,
operator_class,
occurrence,
test_runner):
"""Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the
tests, and report the results.
This is fundamentally the single-mutation-and-test-run process
implementation.
There are three high-level ways that a worker can finish. First, it could
fail exceptionally, meaning that some uncaught exception made its way from
some part of the operation to terminate the function. This function will
intercept all exceptions and return it in a non-exceptional structure.
Second, the mutation testing machinery may determine that there is no
OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this
case there is no way to report a test result (i.e. killed, survived, or
incompetent) so a special value is returned indicating that no mutation is
possible.
Finally, and hopefully normally, the worker will find that it can run a
test. It will do so and report back the result - killed, survived, or
incompetent - in a structured way.
Returns: a WorkItem
Raises: This will generally not raise any exceptions. Rather, exceptions
will be reported using the 'exception' result-type in the return value.
"""
try:
with preserve_modules():
module = importlib.import_module(module_name)
module_source_file = inspect.getsourcefile(module)
module_ast = get_ast(module)
module_source = astunparse.unparse(module_ast)
core = MutatingCore(occurrence)
operator = operator_class(core)
# note: after this step module_ast and modified_ast
# appear to be the same
modified_ast = operator.visit(module_ast)
modified_source = astunparse.unparse(modified_ast)
if not core.activation_record:
return WorkItem(
worker_outcome=WorkerOutcome.NO_TEST)
# generate a source diff to visualize how the mutation
# operator has changed the code
module_diff = ["--- mutation diff ---"]
for line in difflib.unified_diff(module_source.split('\n'),
modified_source.split('\n'),
fromfile="a" + module_source_file,
tofile="b" + module_source_file,
lineterm=""):
module_diff.append(line)
with using_ast(module_name, module_ast):
rec = test_runner()
rec.update({
'diff': module_diff,
'worker_outcome': WorkerOutcome.NORMAL
})
rec.update(core.activation_record)
return rec
except Exception: # noqa # pylint: disable=broad-except
return WorkItem(
data=traceback.format_exception(*sys.exc_info()),
test_outcome=TestOutcome.INCOMPETENT,
worker_outcome=WorkerOutcome.EXCEPTION)
def worker_process(work_item,
timeout,
config):
"""Run `cosmic-ray worker` in a subprocess and return the results,
passing `config` to it via stdin.
Returns: An updated WorkItem
"""
# The work_item param may come as just a dict (e.g. if it arrives over
# celery), so we reconstruct a WorkItem to make it easier to work with.
work_item = WorkItem(work_item)
command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(
**work_item)
log.info('executing: %s', command)
proc = subprocess.Popen(command.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
config_string = serialize_config(config)
try:
outs, _ = proc.communicate(input=config_string, timeout=timeout)
result = json.loads(outs)
work_item.update({
k: v
for k, v
in result.items()
if v is not None
})
except subprocess.TimeoutExpired as exc:
work_item.worker_outcome = WorkerOutcome.TIMEOUT
work_item.data = exc.timeout
proc.kill()
except json.JSONDecodeError as exc:
work_item.worker_outcome = WorkerOutcome.EXCEPTION
work_item.data = exc
work_item.command_line = command
return work_item
|
normal
|
{
"blob_id": "73a778c6e4216c23ac8d82eef96ce7b73b18f661",
"index": 9100,
"step-1": "<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-3": "<mask token>\ntry:\n import typing\nexcept ImportError:\n pass\n<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-4": "<mask token>\nimport difflib\nimport importlib\nimport inspect\nimport json\nimport logging\nimport subprocess\nimport sys\nimport traceback\nimport astunparse\ntry:\n import typing\nexcept ImportError:\n pass\nfrom .config import serialize_config\nfrom .importing import preserve_modules, using_ast\nfrom .mutating import MutatingCore\nfrom .parsing import get_ast\nfrom .testing.test_runner import TestOutcome\nfrom .work_item import WorkItem\nlog = logging.getLogger()\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-5": "\"\"\"This is the body of the low-level worker tool.\n\nA worker is intended to run as a process that imports a module, mutates it in\none location with one operator, runs the tests, reports the results, and dies.\n\"\"\"\n\nimport difflib\nimport importlib\nimport inspect\nimport json\nimport logging\nimport subprocess\nimport sys\nimport traceback\n\nimport astunparse\ntry:\n import typing # the typing module does some fancy stuff at import time\n # which we shall not do twice... by loading it here,\n # preserve_modules does not delete it and therefore\n # fancy stuff happens only once\nexcept ImportError:\n pass\n\nfrom .config import serialize_config\nfrom .importing import preserve_modules, using_ast\nfrom .mutating import MutatingCore\nfrom .parsing import get_ast\nfrom .testing.test_runner import TestOutcome\nfrom .work_item import WorkItem\n\nlog = logging.getLogger()\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name,\n operator_class,\n occurrence,\n test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n # note: after this step module_ast and modified_ast\n # appear to be the same\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n\n if not core.activation_record:\n return WorkItem(\n worker_outcome=WorkerOutcome.NO_TEST)\n\n # generate a source diff to visualize how the mutation\n # operator has changed the code\n module_diff = [\"--- mutation diff ---\"]\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'),\n fromfile=\"a\" + module_source_file,\n tofile=\"b\" + module_source_file,\n lineterm=\"\"):\n module_diff.append(line)\n\n with using_ast(module_name, module_ast):\n rec = test_runner()\n\n rec.update({\n 'diff': module_diff,\n 'worker_outcome': WorkerOutcome.NORMAL\n })\n rec.update(core.activation_record)\n return rec\n\n except Exception: # noqa # pylint: disable=broad-except\n return WorkItem(\n data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT,\n worker_outcome=WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item,\n timeout,\n config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n # The work_item param may come as just a dict (e.g. if it arrives over\n # celery), so we reconstruct a WorkItem to make it easier to work with.\n work_item = WorkItem(work_item)\n\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(\n **work_item)\n\n log.info('executing: %s', command)\n\n proc = subprocess.Popen(command.split(),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({\n k: v\n for k, v\n in result.items()\n if v is not None\n })\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n\n work_item.command_line = command\n return work_item\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(a):
d = b * (a - i) + c * (a - (a - i)) + c * (a - (a - i)) + b * (a - i)
print(d)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = int(input('numero: '))
b = '*'
c = 'o'
for i in range(a):
d = b * (a - i) + c * (a - (a - i)) + c * (a - (a - i)) + b * (a - i)
print(d)
<|reserved_special_token_1|>
from numpy import *
a = int(input('numero: '))
b = '*'
c = 'o'
for i in range(a):
d = b * (a - i) + c * (a - (a - i)) + c * (a - (a - i)) + b * (a - i)
print(d)
<|reserved_special_token_1|>
from numpy import*
a=int(input('numero: '))
b='*'
c='o'
for i in range(a):
d=(b*(a-i))+(c*(a-(a-i)))+(c*(a-(a-i)))+(b*(a-i))
print(d)
|
flexible
|
{
"blob_id": "155b243ad7d93bcf2b74cd5b2bd3409ab7ec7473",
"index": 8488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(a):\n d = b * (a - i) + c * (a - (a - i)) + c * (a - (a - i)) + b * (a - i)\n print(d)\n",
"step-3": "<mask token>\na = int(input('numero: '))\nb = '*'\nc = 'o'\nfor i in range(a):\n d = b * (a - i) + c * (a - (a - i)) + c * (a - (a - i)) + b * (a - i)\n print(d)\n",
"step-4": "from numpy import *\na = int(input('numero: '))\nb = '*'\nc = 'o'\nfor i in range(a):\n d = b * (a - i) + c * (a - (a - i)) + c * (a - (a - i)) + b * (a - i)\n print(d)\n",
"step-5": "from numpy import*\n\na=int(input('numero: '))\nb='*'\nc='o'\nfor i in range(a):\n\td=(b*(a-i))+(c*(a-(a-i)))+(c*(a-(a-i)))+(b*(a-i))\n\tprint(d)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
A, B = map(int, input().split())
K = (B ** 2 - A ** 2) / (2 * A - 2 * B)
print(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')
|
normal
|
{
"blob_id": "36a7d3ed28348e56e54ce4bfa937363a64ee718f",
"index": 6981,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')\n",
"step-3": "A, B = map(int, input().split())\nK = (B ** 2 - A ** 2) / (2 * A - 2 * B)\nprint(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def deps_remote():
for step in INSTALL_STEPS:
run(step)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
INSTALL_STEPS = [
'yes | sudo apt-get install libmysqlclient-dev\t python-dev python-mysqldb python-virtualenv'
, 'virtualenv --no-site-packages env',
'. env/bin/activate;pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
<|reserved_special_token_1|>
from fabric.api import local, run
INSTALL_STEPS = [
'yes | sudo apt-get install libmysqlclient-dev\t python-dev python-mysqldb python-virtualenv'
, 'virtualenv --no-site-packages env',
'. env/bin/activate;pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
<|reserved_special_token_1|>
from fabric.api import local,run
INSTALL_STEPS = ['yes | sudo apt-get install libmysqlclient-dev python-dev python-mysqldb python-virtualenv',
'virtualenv --no-site-packages env',
'. env/bin/activate;pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
|
flexible
|
{
"blob_id": "d64140466e62b78506d0f200f451649023697a3b",
"index": 1386,
"step-1": "<mask token>\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-2": "<mask token>\n\n\ndef deps_local():\n for step in INSTALL_STEPS:\n local(step)\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-3": "<mask token>\nINSTALL_STEPS = [\n 'yes | sudo apt-get install libmysqlclient-dev\\t python-dev python-mysqldb python-virtualenv'\n , 'virtualenv --no-site-packages env',\n '. env/bin/activate;pip install -r requirements.txt']\n\n\ndef deps_local():\n for step in INSTALL_STEPS:\n local(step)\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-4": "from fabric.api import local, run\nINSTALL_STEPS = [\n 'yes | sudo apt-get install libmysqlclient-dev\\t python-dev python-mysqldb python-virtualenv'\n , 'virtualenv --no-site-packages env',\n '. env/bin/activate;pip install -r requirements.txt']\n\n\ndef deps_local():\n for step in INSTALL_STEPS:\n local(step)\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-5": "from fabric.api import local,run\nINSTALL_STEPS = ['yes | sudo apt-get install libmysqlclient-dev\t python-dev python-mysqldb python-virtualenv',\n 'virtualenv --no-site-packages env',\n '. env/bin/activate;pip install -r requirements.txt']\ndef deps_local():\n for step in INSTALL_STEPS:\n \tlocal(step)\ndef deps_remote():\n for step in INSTALL_STEPS:\n \trun(step)\t\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(N):
x, y = map(int, input().split())
if x * x + y * y <= D2:
ans += 1
print(ans)
<|reserved_special_token_1|>
N, D = map(int, input().split())
ans = 0
D2 = D * D
for i in range(N):
x, y = map(int, input().split())
if x * x + y * y <= D2:
ans += 1
print(ans)
<|reserved_special_token_1|>
N, D = map(int, input().split())
ans = 0
D2 = D*D
for i in range(N):
x, y = map(int, input().split())
if (x*x+y*y) <= D2:
ans += 1
print(ans)
|
flexible
|
{
"blob_id": "947055d1d6acc50e1722d79ea30e327414cd9c41",
"index": 8523,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(N):\n x, y = map(int, input().split())\n if x * x + y * y <= D2:\n ans += 1\nprint(ans)\n",
"step-3": "N, D = map(int, input().split())\nans = 0\nD2 = D * D\nfor i in range(N):\n x, y = map(int, input().split())\n if x * x + y * y <= D2:\n ans += 1\nprint(ans)\n",
"step-4": "N, D = map(int, input().split())\nans = 0\nD2 = D*D\nfor i in range(N):\n x, y = map(int, input().split())\n if (x*x+y*y) <= D2:\n ans += 1\n\nprint(ans)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def calc_accumulated_indicende_per_ccaa(report, num_days=15):
ccaas = data_sources.get_ccaas_in_dset(report)
dframe = report['dframe']
num_cases = dframe['num_casos']
ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)
index = num_cases.index.to_frame(index=False)
time_delta = numpy.timedelta64(num_days, 'D')
accumulated_cases_by_ccaa = {}
for ccaa in ccaas:
mask = index[ccaa_column] == ccaa
mask = mask.values
num_cases_for_this_ccaa = num_cases[mask]
this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)
this_ccaa_dates = this_ccaa_index['fecha']
num_accumulated_cases = []
valid_dates = []
for date in this_ccaa_dates:
date0 = date - time_delta
mask = numpy.logical_and(this_ccaa_dates > date0,
this_ccaa_dates <= date)
mask = mask.values
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[
mask]))
valid_dates.append(date)
num_accumulated_cases = pandas.Series(num_accumulated_cases, index=
valid_dates)
num_accumulated_cases = (num_accumulated_cases / data_sources.
POPULATION[ccaa] * 100000.0)
accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases
return accumulated_cases_by_ccaa
<|reserved_special_token_0|>
def is_desired_ccaa(ccaa, desired_ccaas):
return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa
) in desired_ccaas
<|reserved_special_token_0|>
def _create_table_for_chart_from_dframe(dframe, desired_ccaas):
ccaas = sorted(dframe.index)
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(dframe.columns)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dframe.loc[ccaa, date])
table.append(row)
return table, ccaas, dates
def _create_table_for_chart_from_series(series):
table = [(date.date(), value) for date, value in zip(series.index,
series.values)]
return table
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_accumulated_indicende_per_ccaa(report, num_days=15):
ccaas = data_sources.get_ccaas_in_dset(report)
dframe = report['dframe']
num_cases = dframe['num_casos']
ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)
index = num_cases.index.to_frame(index=False)
time_delta = numpy.timedelta64(num_days, 'D')
accumulated_cases_by_ccaa = {}
for ccaa in ccaas:
mask = index[ccaa_column] == ccaa
mask = mask.values
num_cases_for_this_ccaa = num_cases[mask]
this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)
this_ccaa_dates = this_ccaa_index['fecha']
num_accumulated_cases = []
valid_dates = []
for date in this_ccaa_dates:
date0 = date - time_delta
mask = numpy.logical_and(this_ccaa_dates > date0,
this_ccaa_dates <= date)
mask = mask.values
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[
mask]))
valid_dates.append(date)
num_accumulated_cases = pandas.Series(num_accumulated_cases, index=
valid_dates)
num_accumulated_cases = (num_accumulated_cases / data_sources.
POPULATION[ccaa] * 100000.0)
accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases
return accumulated_cases_by_ccaa
def _create_js_chart(dframe, date_range, js_function_name, div_id, title,
width, height):
table = []
ccaas = sorted(dframe.index)
dates = list(dframe.columns)
if date_range is not None:
dates = [date for date in dates if date > date_range[0] and date <=
date_range[1]]
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas])
for date in dates:
row = [date.date()]
for ccaa in ccaas:
value = dframe.loc[ccaa, date]
row.append(value)
table.append(row)
js_function_name = js_function_name
html = material_line_chart.create_chart_js(js_function_name, div_id,
title, columns, table, width=width, height=height)
return html
def _write_table_from_series(series):
html = '<table>'
for index, value in zip(series.index, series.values):
html += f'<tr><td>{index}</td><td>{value}</td></tr>\n'
html += '</table>'
return html
def is_desired_ccaa(ccaa, desired_ccaas):
return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa
) in desired_ccaas
def _create_table_for_chart_from_dict(dict_data, desired_ccaas):
one_data = list(dict_data.values())[0]
ccaas = sorted(dict_data.keys())
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(one_data.index)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dict_data[ccaa][date])
table.append(row)
return table, ccaas, dates
def _create_accumulate_indicence_table_for_spa_chart_from_report(report,
num_days):
dframe = report['dframe']
time_delta = numpy.timedelta64(num_days, 'D')
num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']
tot_pop = sum(data_sources.POPULATION.values())
dates = numpy.array(num_cases.index)
num_accumulated_cases = []
valid_dates = []
for date in dates:
date0 = date - time_delta
mask = numpy.logical_and(dates > date0, dates <= date)
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *
100000.0)
date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)
valid_dates.append(date)
table = [(date.date(), cases) for date, cases in zip(valid_dates,
num_accumulated_cases)]
dates = valid_dates
return table, dates
def _create_table_for_chart_from_dframe(dframe, desired_ccaas):
ccaas = sorted(dframe.index)
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(dframe.columns)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dframe.loc[ccaa, date])
table.append(row)
return table, ccaas, dates
def _create_table_for_chart_from_series(series):
table = [(date.date(), value) for date, value in zip(series.index,
series.values)]
return table
def write_html_report(out_path, date_range=None, desired_ccaas=None,
spa_report=False):
if spa_report and desired_ccaas:
raise ValueError('choose one, either spa or ccaa report')
if desired_ccaas and len(desired_ccaas) == 1:
only_one_ccaa = True
ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])
else:
only_one_ccaa = False
ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()
report = ccaa_info[-1]
accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)
deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files
(), key=lambda x: x['max_date'])[-1]
if spa_report:
accumulated_incidence_table, dates = (
_create_accumulate_indicence_table_for_spa_chart_from_report(
report, 15))
else:
accumulated_incidence_table, ccaas, dates = (
_create_table_for_chart_from_dict(accumulaed_incidence,
desired_ccaas))
title = 'Resumen situación Covid-19'
if spa_report:
title += ' España'
elif only_one_ccaa:
title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)
else:
title += ' por comunidad autónoma'
html = HEADER.format(title)
html += HEADER2
js_function_name = 'drawAccumulatedCasesIncidence'
columns = [('date', 'fecha')]
if spa_report:
columns.extend([('number', 'España')])
else:
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])
title = 'Incidencia acumulada por 100.000 hab. (15 días)'
width = 900
height = 800
rangeslider_height = 50
js_sizes = {'dashboard': {'height': height + rangeslider_height,
'width': width}, 'chart': {'height': height, 'width': width},
'rangeslider': {'height': rangeslider_height, 'width': 600}}
div_sizes = {}
for html_element in js_sizes:
div_sizes[html_element] = {}
div_sizes[html_element]['height'
] = f"{js_sizes[html_element]['height']}px"
div_sizes[html_element]['width'
] = f"{js_sizes[html_element]['width']}px"
slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],
'max_value': dates[-1], 'min_init_value': date_range[0],
'max_init_value': date_range[-1]}
div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',
'chart': 'accumulated_cases_chart', 'rangeslider':
'accumulated_cases_rangeslider'}
html += material_line_chart.create_chart_js_with_slider(js_function_name,
slider_config, div_ids_accumulated_cases, title, columns,
accumulated_incidence_table, sizes=js_sizes)
js_function_names = {'hospitalized': 'drawHospitalized', 'icu':
'drawICU', 'deceased': 'drawDeceased'}
div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',
'deceased': 'deceased_chart'}
titles = {'hospitalized':
'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':
'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':
'Num. fallecidos por 100.000 hab. (media 7 días)'}
if False:
if spa_report:
rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(
)
titles = {'hospitalized':
'Num. hospitalizaciones. (media 7 días)', 'icu':
'Num. ingresos UCI. (media 7 días)', 'deceased':
'Num. fallecidos. (media 7 días)'}
else:
rolling_means = ministry_datasources.get_ministry_rolling_mean()
titles = {'hospitalized':
'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}
div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':
'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}
div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':
'deceased_chart', 'rangeslider': 'deceased_rangeslider'}
div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':
div_ids_deceased}
if False:
dframe = rolling_means['hospitalized']
if spa_report:
columns = [('date', 'fecha'), ('number', 'España')]
table = _create_table_for_chart_from_series(dframe)
else:
populations = [data_sources.get_population(ccaa) for ccaa in
dframe.index]
dframe = dframe.divide(populations, axis=0) * 100000.0
table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,
desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(
ccaa)) for ccaa in ccaas])
key = 'hospitalized'
hospitalized_slider_config = {'column_controlled': 'fecha',
'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':
date_range[0], 'max_init_value': datetime.datetime.now()}
html += material_line_chart.create_chart_js_with_slider(
js_function_names[key], hospitalized_slider_config, div_ids[key
], title=titles[key], columns=columns, data_table=table, sizes=
js_sizes)
num_days = 7
key = 'deceased'
deaths_dframe = deaths['dframe']
if spa_report:
spa_deaths = deaths_dframe.sum(axis=0)
deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,
min_periods=num_days).mean().dropna()
table = _create_table_for_chart_from_series(deaths_rolling_mean)
columns = [('date', 'fecha'), ('number', 'España')]
else:
deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,
min_periods=num_days, axis=1).mean()
deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')
populations = [data_sources.get_population(ccaa) for ccaa in
deaths_rolling_mean.index]
deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0
) * 100000.0
table, ccaas, _ = _create_table_for_chart_from_dframe(
deaths_rolling_mean, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas])
html += material_line_chart.create_chart_js_with_slider(js_function_names
[key], slider_config, div_ids[key], title=titles[key], columns=
columns, data_table=table, sizes=js_sizes)
html += ' </script>\n </head>\n <body>\n'
today = datetime.datetime.now()
html += '<p><a href="../">Menu</a></p>'
html += (
f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'
)
html += (
f'<p>Este informe está generado para uso personal por <a href="https://twitter.com/jblanca42">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'
)
html += (
f'<p>El código utilizado para generarlo se encuentra en <a href="https://github.com/JoseBlanca/seguimiento_covid">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'
)
if desired_ccaas:
index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa
(ccaa, desired_ccaas)]
tot_deaths = deaths['dframe'].loc[index, :].values.sum()
else:
tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'
]
html += f'<p>Número total de fallecidos: {tot_deaths}</p>'
if spa_report:
death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'
elif desired_ccaas and len(desired_ccaas) == 1:
death_rate = round(data_sources.get_population(desired_ccaas[0]) /
tot_deaths)
html += (
f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'
)
else:
deaths_per_ccaa = deaths['dframe'].sum(axis=1)
populations = [data_sources.get_population(ccaa) for ccaa in
deaths_per_ccaa.index]
populations = pandas.Series(populations, index=deaths_per_ccaa.index)
death_rate = (populations / deaths_per_ccaa).round().sort_values(
).astype(int)
html += (
'<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'
)
html += _write_table_from_series(death_rate)
if False:
for key in ['hospitalized']:
html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\n'
html += material_line_chart.create_chart_with_slider_divs(div_ids
[key], sizes=div_sizes)
html += f"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(
div_ids_accumulated_cases, sizes=div_sizes)
for key in ['deceased']:
html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\n'
html += material_line_chart.create_chart_with_slider_divs(div_ids[
key], sizes=div_sizes)
html += ' </body>\n</html>'
out_path.open('wt').write(html)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_accumulated_indicende_per_ccaa(report, num_days=15):
ccaas = data_sources.get_ccaas_in_dset(report)
dframe = report['dframe']
num_cases = dframe['num_casos']
ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)
index = num_cases.index.to_frame(index=False)
time_delta = numpy.timedelta64(num_days, 'D')
accumulated_cases_by_ccaa = {}
for ccaa in ccaas:
mask = index[ccaa_column] == ccaa
mask = mask.values
num_cases_for_this_ccaa = num_cases[mask]
this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)
this_ccaa_dates = this_ccaa_index['fecha']
num_accumulated_cases = []
valid_dates = []
for date in this_ccaa_dates:
date0 = date - time_delta
mask = numpy.logical_and(this_ccaa_dates > date0,
this_ccaa_dates <= date)
mask = mask.values
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[
mask]))
valid_dates.append(date)
num_accumulated_cases = pandas.Series(num_accumulated_cases, index=
valid_dates)
num_accumulated_cases = (num_accumulated_cases / data_sources.
POPULATION[ccaa] * 100000.0)
accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases
return accumulated_cases_by_ccaa
def _create_js_chart(dframe, date_range, js_function_name, div_id, title,
width, height):
table = []
ccaas = sorted(dframe.index)
dates = list(dframe.columns)
if date_range is not None:
dates = [date for date in dates if date > date_range[0] and date <=
date_range[1]]
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas])
for date in dates:
row = [date.date()]
for ccaa in ccaas:
value = dframe.loc[ccaa, date]
row.append(value)
table.append(row)
js_function_name = js_function_name
html = material_line_chart.create_chart_js(js_function_name, div_id,
title, columns, table, width=width, height=height)
return html
def _write_table_from_series(series):
html = '<table>'
for index, value in zip(series.index, series.values):
html += f'<tr><td>{index}</td><td>{value}</td></tr>\n'
html += '</table>'
return html
def is_desired_ccaa(ccaa, desired_ccaas):
return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa
) in desired_ccaas
def _create_table_for_chart_from_dict(dict_data, desired_ccaas):
one_data = list(dict_data.values())[0]
ccaas = sorted(dict_data.keys())
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(one_data.index)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dict_data[ccaa][date])
table.append(row)
return table, ccaas, dates
def _create_accumulate_indicence_table_for_spa_chart_from_report(report,
num_days):
dframe = report['dframe']
time_delta = numpy.timedelta64(num_days, 'D')
num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']
tot_pop = sum(data_sources.POPULATION.values())
dates = numpy.array(num_cases.index)
num_accumulated_cases = []
valid_dates = []
for date in dates:
date0 = date - time_delta
mask = numpy.logical_and(dates > date0, dates <= date)
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *
100000.0)
date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)
valid_dates.append(date)
table = [(date.date(), cases) for date, cases in zip(valid_dates,
num_accumulated_cases)]
dates = valid_dates
return table, dates
def _create_table_for_chart_from_dframe(dframe, desired_ccaas):
ccaas = sorted(dframe.index)
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(dframe.columns)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dframe.loc[ccaa, date])
table.append(row)
return table, ccaas, dates
def _create_table_for_chart_from_series(series):
table = [(date.date(), value) for date, value in zip(series.index,
series.values)]
return table
def write_html_report(out_path, date_range=None, desired_ccaas=None,
spa_report=False):
if spa_report and desired_ccaas:
raise ValueError('choose one, either spa or ccaa report')
if desired_ccaas and len(desired_ccaas) == 1:
only_one_ccaa = True
ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])
else:
only_one_ccaa = False
ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()
report = ccaa_info[-1]
accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)
deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files
(), key=lambda x: x['max_date'])[-1]
if spa_report:
accumulated_incidence_table, dates = (
_create_accumulate_indicence_table_for_spa_chart_from_report(
report, 15))
else:
accumulated_incidence_table, ccaas, dates = (
_create_table_for_chart_from_dict(accumulaed_incidence,
desired_ccaas))
title = 'Resumen situación Covid-19'
if spa_report:
title += ' España'
elif only_one_ccaa:
title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)
else:
title += ' por comunidad autónoma'
html = HEADER.format(title)
html += HEADER2
js_function_name = 'drawAccumulatedCasesIncidence'
columns = [('date', 'fecha')]
if spa_report:
columns.extend([('number', 'España')])
else:
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])
title = 'Incidencia acumulada por 100.000 hab. (15 días)'
width = 900
height = 800
rangeslider_height = 50
js_sizes = {'dashboard': {'height': height + rangeslider_height,
'width': width}, 'chart': {'height': height, 'width': width},
'rangeslider': {'height': rangeslider_height, 'width': 600}}
div_sizes = {}
for html_element in js_sizes:
div_sizes[html_element] = {}
div_sizes[html_element]['height'
] = f"{js_sizes[html_element]['height']}px"
div_sizes[html_element]['width'
] = f"{js_sizes[html_element]['width']}px"
slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],
'max_value': dates[-1], 'min_init_value': date_range[0],
'max_init_value': date_range[-1]}
div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',
'chart': 'accumulated_cases_chart', 'rangeslider':
'accumulated_cases_rangeslider'}
html += material_line_chart.create_chart_js_with_slider(js_function_name,
slider_config, div_ids_accumulated_cases, title, columns,
accumulated_incidence_table, sizes=js_sizes)
js_function_names = {'hospitalized': 'drawHospitalized', 'icu':
'drawICU', 'deceased': 'drawDeceased'}
div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',
'deceased': 'deceased_chart'}
titles = {'hospitalized':
'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':
'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':
'Num. fallecidos por 100.000 hab. (media 7 días)'}
if False:
if spa_report:
rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(
)
titles = {'hospitalized':
'Num. hospitalizaciones. (media 7 días)', 'icu':
'Num. ingresos UCI. (media 7 días)', 'deceased':
'Num. fallecidos. (media 7 días)'}
else:
rolling_means = ministry_datasources.get_ministry_rolling_mean()
titles = {'hospitalized':
'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}
div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':
'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}
div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':
'deceased_chart', 'rangeslider': 'deceased_rangeslider'}
div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':
div_ids_deceased}
if False:
dframe = rolling_means['hospitalized']
if spa_report:
columns = [('date', 'fecha'), ('number', 'España')]
table = _create_table_for_chart_from_series(dframe)
else:
populations = [data_sources.get_population(ccaa) for ccaa in
dframe.index]
dframe = dframe.divide(populations, axis=0) * 100000.0
table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,
desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(
ccaa)) for ccaa in ccaas])
key = 'hospitalized'
hospitalized_slider_config = {'column_controlled': 'fecha',
'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':
date_range[0], 'max_init_value': datetime.datetime.now()}
html += material_line_chart.create_chart_js_with_slider(
js_function_names[key], hospitalized_slider_config, div_ids[key
], title=titles[key], columns=columns, data_table=table, sizes=
js_sizes)
num_days = 7
key = 'deceased'
deaths_dframe = deaths['dframe']
if spa_report:
spa_deaths = deaths_dframe.sum(axis=0)
deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,
min_periods=num_days).mean().dropna()
table = _create_table_for_chart_from_series(deaths_rolling_mean)
columns = [('date', 'fecha'), ('number', 'España')]
else:
deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,
min_periods=num_days, axis=1).mean()
deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')
populations = [data_sources.get_population(ccaa) for ccaa in
deaths_rolling_mean.index]
deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0
) * 100000.0
table, ccaas, _ = _create_table_for_chart_from_dframe(
deaths_rolling_mean, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas])
html += material_line_chart.create_chart_js_with_slider(js_function_names
[key], slider_config, div_ids[key], title=titles[key], columns=
columns, data_table=table, sizes=js_sizes)
html += ' </script>\n </head>\n <body>\n'
today = datetime.datetime.now()
html += '<p><a href="../">Menu</a></p>'
html += (
f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'
)
html += (
f'<p>Este informe está generado para uso personal por <a href="https://twitter.com/jblanca42">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'
)
html += (
f'<p>El código utilizado para generarlo se encuentra en <a href="https://github.com/JoseBlanca/seguimiento_covid">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'
)
if desired_ccaas:
index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa
(ccaa, desired_ccaas)]
tot_deaths = deaths['dframe'].loc[index, :].values.sum()
else:
tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'
]
html += f'<p>Número total de fallecidos: {tot_deaths}</p>'
if spa_report:
death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'
elif desired_ccaas and len(desired_ccaas) == 1:
death_rate = round(data_sources.get_population(desired_ccaas[0]) /
tot_deaths)
html += (
f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'
)
else:
deaths_per_ccaa = deaths['dframe'].sum(axis=1)
populations = [data_sources.get_population(ccaa) for ccaa in
deaths_per_ccaa.index]
populations = pandas.Series(populations, index=deaths_per_ccaa.index)
death_rate = (populations / deaths_per_ccaa).round().sort_values(
).astype(int)
html += (
'<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'
)
html += _write_table_from_series(death_rate)
if False:
for key in ['hospitalized']:
html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\n'
html += material_line_chart.create_chart_with_slider_divs(div_ids
[key], sizes=div_sizes)
html += f"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(
div_ids_accumulated_cases, sizes=div_sizes)
for key in ['deceased']:
html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\n'
html += material_line_chart.create_chart_with_slider_divs(div_ids[
key], sizes=div_sizes)
html += ' </body>\n</html>'
out_path.open('wt').write(html)
if __name__ == '__main__':
ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)
forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)
first_date = datetime.datetime(2020, 9, 1)
out_dir = config.HTML_REPORTS_DIR
out_dir.mkdir(exist_ok=True)
out_path = out_dir / 'situacion_covid_por_ca.html'
write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
HEADER = """<html>
<head>
<title>{}</title>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
"""
HEADER2 = """
google.charts.load('current', {'packages':['line', 'corechart', 'controls']});
"""
DESCRIPTIONS_CCAA = {'incidencia_acumulada':
'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'
, 'hospitalized':
'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'
, 'deceased':
'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'
}
DESCRIPTIONS_SPA = {'incidencia_acumulada':
'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'
, 'hospitalized':
'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'
, 'deceased':
'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'
}
DESCRIPTIONS = {(True): DESCRIPTIONS_SPA, (False): DESCRIPTIONS_CCAA}
def calc_accumulated_indicende_per_ccaa(report, num_days=15):
ccaas = data_sources.get_ccaas_in_dset(report)
dframe = report['dframe']
num_cases = dframe['num_casos']
ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)
index = num_cases.index.to_frame(index=False)
time_delta = numpy.timedelta64(num_days, 'D')
accumulated_cases_by_ccaa = {}
for ccaa in ccaas:
mask = index[ccaa_column] == ccaa
mask = mask.values
num_cases_for_this_ccaa = num_cases[mask]
this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)
this_ccaa_dates = this_ccaa_index['fecha']
num_accumulated_cases = []
valid_dates = []
for date in this_ccaa_dates:
date0 = date - time_delta
mask = numpy.logical_and(this_ccaa_dates > date0,
this_ccaa_dates <= date)
mask = mask.values
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[
mask]))
valid_dates.append(date)
num_accumulated_cases = pandas.Series(num_accumulated_cases, index=
valid_dates)
num_accumulated_cases = (num_accumulated_cases / data_sources.
POPULATION[ccaa] * 100000.0)
accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases
return accumulated_cases_by_ccaa
def _create_js_chart(dframe, date_range, js_function_name, div_id, title,
width, height):
table = []
ccaas = sorted(dframe.index)
dates = list(dframe.columns)
if date_range is not None:
dates = [date for date in dates if date > date_range[0] and date <=
date_range[1]]
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas])
for date in dates:
row = [date.date()]
for ccaa in ccaas:
value = dframe.loc[ccaa, date]
row.append(value)
table.append(row)
js_function_name = js_function_name
html = material_line_chart.create_chart_js(js_function_name, div_id,
title, columns, table, width=width, height=height)
return html
def _write_table_from_series(series):
html = '<table>'
for index, value in zip(series.index, series.values):
html += f'<tr><td>{index}</td><td>{value}</td></tr>\n'
html += '</table>'
return html
def is_desired_ccaa(ccaa, desired_ccaas):
return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa
) in desired_ccaas
def _create_table_for_chart_from_dict(dict_data, desired_ccaas):
one_data = list(dict_data.values())[0]
ccaas = sorted(dict_data.keys())
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(one_data.index)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dict_data[ccaa][date])
table.append(row)
return table, ccaas, dates
def _create_accumulate_indicence_table_for_spa_chart_from_report(report,
num_days):
dframe = report['dframe']
time_delta = numpy.timedelta64(num_days, 'D')
num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']
tot_pop = sum(data_sources.POPULATION.values())
dates = numpy.array(num_cases.index)
num_accumulated_cases = []
valid_dates = []
for date in dates:
date0 = date - time_delta
mask = numpy.logical_and(dates > date0, dates <= date)
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *
100000.0)
date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)
valid_dates.append(date)
table = [(date.date(), cases) for date, cases in zip(valid_dates,
num_accumulated_cases)]
dates = valid_dates
return table, dates
def _create_table_for_chart_from_dframe(dframe, desired_ccaas):
ccaas = sorted(dframe.index)
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(dframe.columns)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dframe.loc[ccaa, date])
table.append(row)
return table, ccaas, dates
def _create_table_for_chart_from_series(series):
table = [(date.date(), value) for date, value in zip(series.index,
series.values)]
return table
def write_html_report(out_path, date_range=None, desired_ccaas=None,
spa_report=False):
if spa_report and desired_ccaas:
raise ValueError('choose one, either spa or ccaa report')
if desired_ccaas and len(desired_ccaas) == 1:
only_one_ccaa = True
ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])
else:
only_one_ccaa = False
ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()
report = ccaa_info[-1]
accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)
deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files
(), key=lambda x: x['max_date'])[-1]
if spa_report:
accumulated_incidence_table, dates = (
_create_accumulate_indicence_table_for_spa_chart_from_report(
report, 15))
else:
accumulated_incidence_table, ccaas, dates = (
_create_table_for_chart_from_dict(accumulaed_incidence,
desired_ccaas))
title = 'Resumen situación Covid-19'
if spa_report:
title += ' España'
elif only_one_ccaa:
title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)
else:
title += ' por comunidad autónoma'
html = HEADER.format(title)
html += HEADER2
js_function_name = 'drawAccumulatedCasesIncidence'
columns = [('date', 'fecha')]
if spa_report:
columns.extend([('number', 'España')])
else:
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])
title = 'Incidencia acumulada por 100.000 hab. (15 días)'
width = 900
height = 800
rangeslider_height = 50
js_sizes = {'dashboard': {'height': height + rangeslider_height,
'width': width}, 'chart': {'height': height, 'width': width},
'rangeslider': {'height': rangeslider_height, 'width': 600}}
div_sizes = {}
for html_element in js_sizes:
div_sizes[html_element] = {}
div_sizes[html_element]['height'
] = f"{js_sizes[html_element]['height']}px"
div_sizes[html_element]['width'
] = f"{js_sizes[html_element]['width']}px"
slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],
'max_value': dates[-1], 'min_init_value': date_range[0],
'max_init_value': date_range[-1]}
div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',
'chart': 'accumulated_cases_chart', 'rangeslider':
'accumulated_cases_rangeslider'}
html += material_line_chart.create_chart_js_with_slider(js_function_name,
slider_config, div_ids_accumulated_cases, title, columns,
accumulated_incidence_table, sizes=js_sizes)
js_function_names = {'hospitalized': 'drawHospitalized', 'icu':
'drawICU', 'deceased': 'drawDeceased'}
div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',
'deceased': 'deceased_chart'}
titles = {'hospitalized':
'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':
'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':
'Num. fallecidos por 100.000 hab. (media 7 días)'}
if False:
if spa_report:
rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(
)
titles = {'hospitalized':
'Num. hospitalizaciones. (media 7 días)', 'icu':
'Num. ingresos UCI. (media 7 días)', 'deceased':
'Num. fallecidos. (media 7 días)'}
else:
rolling_means = ministry_datasources.get_ministry_rolling_mean()
titles = {'hospitalized':
'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}
div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':
'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}
div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':
'deceased_chart', 'rangeslider': 'deceased_rangeslider'}
div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':
div_ids_deceased}
if False:
dframe = rolling_means['hospitalized']
if spa_report:
columns = [('date', 'fecha'), ('number', 'España')]
table = _create_table_for_chart_from_series(dframe)
else:
populations = [data_sources.get_population(ccaa) for ccaa in
dframe.index]
dframe = dframe.divide(populations, axis=0) * 100000.0
table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,
desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(
ccaa)) for ccaa in ccaas])
key = 'hospitalized'
hospitalized_slider_config = {'column_controlled': 'fecha',
'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':
date_range[0], 'max_init_value': datetime.datetime.now()}
html += material_line_chart.create_chart_js_with_slider(
js_function_names[key], hospitalized_slider_config, div_ids[key
], title=titles[key], columns=columns, data_table=table, sizes=
js_sizes)
num_days = 7
key = 'deceased'
deaths_dframe = deaths['dframe']
if spa_report:
spa_deaths = deaths_dframe.sum(axis=0)
deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,
min_periods=num_days).mean().dropna()
table = _create_table_for_chart_from_series(deaths_rolling_mean)
columns = [('date', 'fecha'), ('number', 'España')]
else:
deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,
min_periods=num_days, axis=1).mean()
deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')
populations = [data_sources.get_population(ccaa) for ccaa in
deaths_rolling_mean.index]
deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0
) * 100000.0
table, ccaas, _ = _create_table_for_chart_from_dframe(
deaths_rolling_mean, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for
ccaa in ccaas])
html += material_line_chart.create_chart_js_with_slider(js_function_names
[key], slider_config, div_ids[key], title=titles[key], columns=
columns, data_table=table, sizes=js_sizes)
html += ' </script>\n </head>\n <body>\n'
today = datetime.datetime.now()
html += '<p><a href="../">Menu</a></p>'
html += (
f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'
)
html += (
f'<p>Este informe está generado para uso personal por <a href="https://twitter.com/jblanca42">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'
)
html += (
f'<p>El código utilizado para generarlo se encuentra en <a href="https://github.com/JoseBlanca/seguimiento_covid">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'
)
if desired_ccaas:
index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa
(ccaa, desired_ccaas)]
tot_deaths = deaths['dframe'].loc[index, :].values.sum()
else:
tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'
]
html += f'<p>Número total de fallecidos: {tot_deaths}</p>'
if spa_report:
death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'
elif desired_ccaas and len(desired_ccaas) == 1:
death_rate = round(data_sources.get_population(desired_ccaas[0]) /
tot_deaths)
html += (
f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'
)
else:
deaths_per_ccaa = deaths['dframe'].sum(axis=1)
populations = [data_sources.get_population(ccaa) for ccaa in
deaths_per_ccaa.index]
populations = pandas.Series(populations, index=deaths_per_ccaa.index)
death_rate = (populations / deaths_per_ccaa).round().sort_values(
).astype(int)
html += (
'<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'
)
html += _write_table_from_series(death_rate)
if False:
for key in ['hospitalized']:
html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\n'
html += material_line_chart.create_chart_with_slider_divs(div_ids
[key], sizes=div_sizes)
html += f"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(
div_ids_accumulated_cases, sizes=div_sizes)
for key in ['deceased']:
html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\n'
html += material_line_chart.create_chart_with_slider_divs(div_ids[
key], sizes=div_sizes)
html += ' </body>\n</html>'
out_path.open('wt').write(html)
if __name__ == '__main__':
ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)
forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)
first_date = datetime.datetime(2020, 9, 1)
out_dir = config.HTML_REPORTS_DIR
out_dir.mkdir(exist_ok=True)
out_path = out_dir / 'situacion_covid_por_ca.html'
write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])
<|reserved_special_token_1|>
from datetime import date
import config
import datetime
import numpy
import pandas
import data_sources
from data_sources import POPULATION, convert_to_ccaa_iso
import material_line_chart
import ministry_datasources
HEADER = '''<html>
<head>
<title>{}</title>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
'''
HEADER2 = '''
google.charts.load('current', {'packages':['line', 'corechart', 'controls']});
'''
DESCRIPTIONS_CCAA = {
'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',
'hospitalized': 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',
'deceased': 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',
}
DESCRIPTIONS_SPA = {
'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',
'hospitalized': 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',
'deceased': 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',
}
DESCRIPTIONS = {True: DESCRIPTIONS_SPA, False: DESCRIPTIONS_CCAA}
def calc_accumulated_indicende_per_ccaa(report, num_days=15):
ccaas = data_sources.get_ccaas_in_dset(report)
dframe = report['dframe']
num_cases = dframe['num_casos']
ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)
index = num_cases.index.to_frame(index=False)
time_delta = numpy.timedelta64(num_days, 'D')
accumulated_cases_by_ccaa = {}
for ccaa in ccaas:
mask = index[ccaa_column] == ccaa
mask = mask.values
num_cases_for_this_ccaa = num_cases[mask]
this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)
this_ccaa_dates = this_ccaa_index['fecha']
num_accumulated_cases = []
valid_dates = []
for date in this_ccaa_dates:
date0 = date - time_delta
mask = numpy.logical_and(this_ccaa_dates > date0,
this_ccaa_dates <= date)
mask = mask.values
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[mask]))
valid_dates.append(date)
num_accumulated_cases = pandas.Series(num_accumulated_cases, index=valid_dates)
num_accumulated_cases = num_accumulated_cases / data_sources.POPULATION[ccaa] * 1e5
accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases
return accumulated_cases_by_ccaa
def _create_js_chart(dframe, date_range, js_function_name, div_id, title, width, height):
table = []
ccaas = sorted(dframe.index)
dates = list(dframe.columns)
if date_range is not None:
dates = [date for date in dates if date > date_range[0] and date <= date_range[1]]
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
for date in dates:
row = [date.date()]
for ccaa in ccaas:
value = dframe.loc[ccaa, date]
row.append(value)
table.append(row)
js_function_name = js_function_name
html = material_line_chart.create_chart_js(js_function_name, div_id, title,
columns, table,
width=width, height=height)
return html
def _write_table_from_series(series):
html = '<table>'
for index, value in zip(series.index, series.values):
html += f'<tr><td>{index}</td><td>{value}</td></tr>\n'
html += '</table>'
return html
def is_desired_ccaa(ccaa, desired_ccaas):
return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa) in desired_ccaas
def _create_table_for_chart_from_dict(dict_data, desired_ccaas):
one_data = list(dict_data.values())[0]
ccaas = sorted(dict_data.keys())
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(one_data.index)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dict_data[ccaa][date])
table.append(row)
return table, ccaas, dates
def _create_accumulate_indicence_table_for_spa_chart_from_report(report, num_days):
dframe = report['dframe']
time_delta = numpy.timedelta64(num_days, 'D')
num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']
tot_pop = sum(data_sources.POPULATION.values())
dates = numpy.array(num_cases.index)
num_accumulated_cases = []
valid_dates = []
for date in dates:
date0 = date - time_delta
mask = numpy.logical_and(dates > date0,
dates <= date)
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop * 1e5)
date = datetime.datetime.fromtimestamp(date.astype('O') / 1e9)
valid_dates.append(date)
table = [(date.date(), cases) for date, cases in zip(valid_dates, num_accumulated_cases)]
dates = valid_dates
return table, dates
def _create_table_for_chart_from_dframe(dframe, desired_ccaas):
ccaas = sorted(dframe.index)
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(dframe.columns)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dframe.loc[ccaa, date])
table.append(row)
return table, ccaas, dates
def _create_table_for_chart_from_series(series):
table = [(date.date(), value) for date, value in zip(series.index, series.values)]
return table
def write_html_report(out_path, date_range=None, desired_ccaas=None, spa_report=False):
if spa_report and desired_ccaas:
raise ValueError('choose one, either spa or ccaa report')
if desired_ccaas and len(desired_ccaas) == 1:
only_one_ccaa = True
ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])
else:
only_one_ccaa = False
ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()
report = ccaa_info[-1]
accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)
deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files(),
key=lambda x: x['max_date'])[-1]
if spa_report:
accumulated_incidence_table, dates = _create_accumulate_indicence_table_for_spa_chart_from_report(report, 15)
else:
accumulated_incidence_table, ccaas, dates = _create_table_for_chart_from_dict(accumulaed_incidence, desired_ccaas)
title = 'Resumen situación Covid-19'
if spa_report:
title += ' España'
elif only_one_ccaa:
title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)
else:
title += ' por comunidad autónoma'
html = HEADER.format(title)
html += HEADER2
js_function_name = 'drawAccumulatedCasesIncidence'
columns = [('date', 'fecha')]
if spa_report:
columns.extend([('number', 'España')])
else:
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])
title = 'Incidencia acumulada por 100.000 hab. (15 días)'
width =900
height = 800
rangeslider_height = 50
js_sizes = {'dashboard': {'height': height + rangeslider_height, 'width': width},
'chart': {'height': height, 'width': width},
'rangeslider': {'height': rangeslider_height, 'width': 600},
}
div_sizes = {}
for html_element in js_sizes:
div_sizes[html_element] = {}
div_sizes[html_element]['height'] = f"{js_sizes[html_element]['height']}px"
div_sizes[html_element]['width'] = f"{js_sizes[html_element]['width']}px"
slider_config = {'column_controlled': 'fecha',
'min_value': dates[0],
'max_value': dates[-1],
'min_init_value': date_range[0],
'max_init_value': date_range[-1]}
div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',
'chart': 'accumulated_cases_chart',
'rangeslider': 'accumulated_cases_rangeslider'}
html += material_line_chart.create_chart_js_with_slider(js_function_name,
slider_config,
div_ids_accumulated_cases,
title,
columns,
accumulated_incidence_table,
sizes=js_sizes)
js_function_names = {'hospitalized': 'drawHospitalized',
'icu': 'drawICU',
'deceased': 'drawDeceased'}
div_ids = {'hospitalized': 'hospitalized_chart',
'icu': 'icu_chart',
'deceased': 'deceased_chart'
}
titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'
}
if False:
if spa_report:
rolling_means = ministry_datasources.get_ministry_rolling_mean_spa()
titles = {'hospitalized': 'Num. hospitalizaciones. (media 7 días)',
'icu': 'Num. ingresos UCI. (media 7 días)',
'deceased': 'Num. fallecidos. (media 7 días)'
}
else:
rolling_means = ministry_datasources.get_ministry_rolling_mean()
titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'
}
div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard',
'chart': 'hospitalized_chart',
'rangeslider': 'hospitalized_rangeslider'}
div_ids_deceased = {'dashboard': 'deceased_dashboard',
'chart': 'deceased_chart',
'rangeslider': 'deceased_rangeslider'}
div_ids = {'hospitalized': div_ids_hospitalized,
'deceased': div_ids_deceased,
}
if False:
dframe = rolling_means['hospitalized']
if spa_report:
columns = [('date', 'fecha'), ('number', 'España')]
table = _create_table_for_chart_from_series(dframe)
else:
populations = [data_sources.get_population(ccaa) for ccaa in dframe.index]
dframe = dframe.divide(populations, axis=0) * 1e5
table, ccaas, _ = _create_table_for_chart_from_dframe(dframe, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
key = 'hospitalized'
hospitalized_slider_config = {'column_controlled': 'fecha',
'min_value': dates[0],
'max_value': dates[-1],
'min_init_value': date_range[0],
'max_init_value': datetime.datetime.now()}
html += material_line_chart.create_chart_js_with_slider(js_function_names[key],
hospitalized_slider_config,
div_ids[key],
title=titles[key],
columns=columns,
data_table=table,
sizes=js_sizes)
num_days = 7
key = 'deceased'
deaths_dframe = deaths['dframe']
if spa_report:
spa_deaths = deaths_dframe.sum(axis=0)
deaths_rolling_mean = spa_deaths.rolling(num_days, center=True, min_periods=num_days).mean().dropna()
table = _create_table_for_chart_from_series(deaths_rolling_mean)
columns = [('date', 'fecha'), ('number', 'España')]
else:
deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True, min_periods=num_days, axis=1).mean()
deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')
populations = [data_sources.get_population(ccaa) for ccaa in deaths_rolling_mean.index]
deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0) * 1e5
table, ccaas, _ = _create_table_for_chart_from_dframe(deaths_rolling_mean, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
html += material_line_chart.create_chart_js_with_slider(js_function_names[key],
slider_config,
div_ids[key],
title=titles[key],
columns=columns,
data_table=table,
sizes=js_sizes)
html += ' </script>\n </head>\n <body>\n'
today = datetime.datetime.now()
html += '<p><a href="../">Menu</a></p>'
html += f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'
html += f'<p>Este informe está generado para uso personal por <a href="https://twitter.com/jblanca42">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'
html += f'<p>El código utilizado para generarlo se encuentra en <a href="https://github.com/JoseBlanca/seguimiento_covid">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'
if desired_ccaas:
index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa(ccaa, desired_ccaas)]
tot_deaths = deaths['dframe'].loc[index, :].values.sum()
else:
tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths']
html += f'<p>Número total de fallecidos: {tot_deaths}</p>'
if spa_report:
death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'
elif desired_ccaas and len(desired_ccaas) == 1:
death_rate = round(data_sources.get_population(desired_ccaas[0]) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'
else:
deaths_per_ccaa = deaths['dframe'].sum(axis=1)
populations = [data_sources.get_population(ccaa) for ccaa in deaths_per_ccaa.index]
populations = pandas.Series(populations, index=deaths_per_ccaa.index)
death_rate = (populations / deaths_per_ccaa).round().sort_values().astype(int)
html += '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'
html += _write_table_from_series(death_rate)
if False:
for key in ['hospitalized']:
html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids[key],
sizes=div_sizes)
html += f"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids_accumulated_cases,
sizes=div_sizes)
for key in ['deceased']:
html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids[key],
sizes=div_sizes)
html += ' </body>\n</html>'
out_path.open('wt').write(html)
if __name__ == '__main__':
ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)
forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)
first_date = datetime.datetime(2020, 9, 1)
out_dir = config.HTML_REPORTS_DIR
out_dir.mkdir(exist_ok=True)
out_path = out_dir / 'situacion_covid_por_ca.html'
write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])
|
flexible
|
{
"blob_id": "4c5b3042a785342d6ef06fdc882e0dcf91a787c3",
"index": 7816,
"step-1": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\n<mask token>\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\n<mask token>\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n",
"step-4": "<mask token>\nHEADER = \"\"\"<html>\n <head>\n <title>{}</title>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n\"\"\"\nHEADER2 = \"\"\"\n google.charts.load('current', {'packages':['line', 'corechart', 'controls']});\n\n\"\"\"\nDESCRIPTIONS_CCAA = {'incidencia_acumulada':\n 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'\n , 'hospitalized':\n 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'\n , 'deceased':\n 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'\n }\nDESCRIPTIONS_SPA = {'incidencia_acumulada':\n 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'\n , 'hospitalized':\n 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'\n , 'deceased':\n 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'\n }\nDESCRIPTIONS = {(True): DESCRIPTIONS_SPA, (False): DESCRIPTIONS_CCAA}\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n",
"step-5": "\nfrom datetime import date\nimport config\n\nimport datetime\n\nimport numpy\nimport pandas\n\nimport data_sources\nfrom data_sources import POPULATION, convert_to_ccaa_iso\nimport material_line_chart\nimport ministry_datasources\n\n\nHEADER = '''<html>\n <head>\n <title>{}</title>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n'''\n\nHEADER2 = '''\n google.charts.load('current', {'packages':['line', 'corechart', 'controls']});\n\n'''\n\n\nDESCRIPTIONS_CCAA = {\n'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',\n'hospitalized': 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',\n'deceased': 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',\n}\nDESCRIPTIONS_SPA = {\n'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',\n'hospitalized': 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',\n'deceased': 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',\n}\nDESCRIPTIONS = {True: DESCRIPTIONS_SPA, False: DESCRIPTIONS_CCAA}\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos'] \n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n\n time_delta = numpy.timedelta64(num_days, 'D')\n\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0,\n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[mask]))\n valid_dates.append(date)\n \n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=valid_dates)\n num_accumulated_cases = num_accumulated_cases / data_sources.POPULATION[ccaa] * 1e5\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title, width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <= date_range[1]]\n\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id, title,\n columns, table,\n width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report, num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0,\n dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop * 1e5)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1e9)\n valid_dates.append(date)\n\n table = [(date.date(), cases) for date, cases in zip(valid_dates, num_accumulated_cases)]\n dates = valid_dates\n\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index, series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None, spa_report=False):\n\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files(),\n key=lambda x: x['max_date'])[-1]\n\n if spa_report:\n accumulated_incidence_table, dates = _create_accumulate_indicence_table_for_spa_chart_from_report(report, 15)\n else:\n accumulated_incidence_table, ccaas, dates = _create_table_for_chart_from_dict(accumulaed_incidence, desired_ccaas)\n\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n\n width =900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height, 'width': width},\n 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600},\n }\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'] = f\"{js_sizes[html_element]['width']}px\"\n\n slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0],\n 'max_value': dates[-1],\n 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart',\n 'rangeslider': 'accumulated_cases_rangeslider'}\n\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config,\n div_ids_accumulated_cases,\n title,\n columns,\n accumulated_incidence_table,\n sizes=js_sizes)\n\n js_function_names = {'hospitalized': 'drawHospitalized',\n 'icu': 'drawICU',\n 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart',\n 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'\n }\n titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'\n }\n\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa()\n titles = {'hospitalized': 'Num. hospitalizaciones. (media 7 días)',\n 'icu': 'Num. ingresos UCI. (media 7 días)',\n 'deceased': 'Num. fallecidos. (media 7 días)'\n }\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'\n }\n\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard',\n 'chart': 'hospitalized_chart',\n 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard',\n 'chart': 'deceased_chart',\n 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized,\n 'deceased': div_ids_deceased,\n }\n\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in dframe.index]\n dframe = dframe.divide(populations, axis=0) * 1e5\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0],\n 'max_value': dates[-1],\n 'min_init_value': date_range[0],\n 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(js_function_names[key],\n hospitalized_slider_config,\n div_ids[key],\n title=titles[key],\n columns=columns,\n data_table=table,\n sizes=js_sizes)\n\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True, min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True, min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0) * 1e5\n\n table, ccaas, _ = _create_table_for_chart_from_dframe(deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n html += material_line_chart.create_chart_js_with_slider(js_function_names[key],\n slider_config,\n div_ids[key],\n title=titles[key],\n columns=columns,\n data_table=table,\n sizes=js_sizes)\n\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n\n html += f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n html += f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa(ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths']\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values().astype(int)\n html += '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n html += _write_table_from_series(death_rate)\n\n if False:\n for key in ['hospitalized']:\n html += f\"<p>{DESCRIPTIONS[spa_report][key]}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(div_ids[key],\n sizes=div_sizes)\n\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n\n html += material_line_chart.create_chart_with_slider_divs(div_ids_accumulated_cases,\n sizes=div_sizes)\n for key in ['deceased']:\n html += f\"<p>{DESCRIPTIONS[spa_report][key]}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(div_ids[key],\n sizes=div_sizes)\n\n html += ' </body>\\n</html>'\n\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n",
"step-ids": [
4,
9,
10,
11,
13
]
}
|
[
4,
9,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Player:
<|reserved_special_token_0|>
def pick_up_item(self, item):
if len(self.items) <= 3:
self.items.append(item)
print(
f"""
NOW YOU HAVE THE {item}!
You can drop it at any time by typing 'drop {item}'
"""
)
else:
print("Sorry you'll have to drop something to pick this up.")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Player:
<|reserved_special_token_0|>
def pick_up_item(self, item):
if len(self.items) <= 3:
self.items.append(item)
print(
f"""
NOW YOU HAVE THE {item}!
You can drop it at any time by typing 'drop {item}'
"""
)
else:
print("Sorry you'll have to drop something to pick this up.")
def drop_item(self, item):
if len(self.items) > 0:
self.items.remove(item)
print(f'YOU HAVE DROPPED THE {item}.')
else:
print("You don't have any items to drop!")
<|reserved_special_token_1|>
class Player:
def __init__(self, name, location, items=[]):
self.name = name
self.location = location
self.items = items
def pick_up_item(self, item):
if len(self.items) <= 3:
self.items.append(item)
print(
f"""
NOW YOU HAVE THE {item}!
You can drop it at any time by typing 'drop {item}'
"""
)
else:
print("Sorry you'll have to drop something to pick this up.")
def drop_item(self, item):
if len(self.items) > 0:
self.items.remove(item)
print(f'YOU HAVE DROPPED THE {item}.')
else:
print("You don't have any items to drop!")
<|reserved_special_token_1|>
# Write a class to hold player information, e.g. what room they are in
# currently.
class Player():
def __init__(self, name, location, items=[]):
self.name = name
self.location = location
self.items = items
# def try_direction(self, user_action):
# attribute = user_action + '_to'
# # see if the current room has an attribute
# # we can use 'hasattr' (has attribute)
# if hasattr(self.location, attribute):
# # can use 'getattr' to move to room
# self.location = getattr(self.location, attribute)
# else:
# print("Nothing to find here!")
def pick_up_item(self, item):
if len(self.items) <= 3:
self.items.append(item)
print(f"""\n\nNOW YOU HAVE THE {item}!
You can drop it at any time by typing 'drop {item}'\n""")
else:
print("Sorry you'll have to drop something to pick this up.")
def drop_item(self, item):
if len(self.items) > 0:
self.items.remove(item)
print(f"YOU HAVE DROPPED THE {item}.")
else:
print("You don't have any items to drop!")
# add for player to print what items they have
# def print_items
|
flexible
|
{
"blob_id": "b355bd5a519d65ea35d4e8d5e6a384424d79130a",
"index": 3620,
"step-1": "<mask token>\n",
"step-2": "class Player:\n <mask token>\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(\n f\"\"\"\n\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\n\"\"\"\n )\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n <mask token>\n",
"step-3": "class Player:\n <mask token>\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(\n f\"\"\"\n\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\n\"\"\"\n )\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n\n def drop_item(self, item):\n if len(self.items) > 0:\n self.items.remove(item)\n print(f'YOU HAVE DROPPED THE {item}.')\n else:\n print(\"You don't have any items to drop!\")\n",
"step-4": "class Player:\n\n def __init__(self, name, location, items=[]):\n self.name = name\n self.location = location\n self.items = items\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(\n f\"\"\"\n\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\n\"\"\"\n )\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n\n def drop_item(self, item):\n if len(self.items) > 0:\n self.items.remove(item)\n print(f'YOU HAVE DROPPED THE {item}.')\n else:\n print(\"You don't have any items to drop!\")\n",
"step-5": "# Write a class to hold player information, e.g. what room they are in\n# currently.\n\n\nclass Player():\n def __init__(self, name, location, items=[]):\n self.name = name\n self.location = location\n self.items = items\n\n # def try_direction(self, user_action):\n # attribute = user_action + '_to'\n\n # # see if the current room has an attribute\n # # we can use 'hasattr' (has attribute)\n # if hasattr(self.location, attribute):\n # # can use 'getattr' to move to room\n # self.location = getattr(self.location, attribute)\n # else:\n # print(\"Nothing to find here!\")\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(f\"\"\"\\n\\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\\n\"\"\")\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n\n def drop_item(self, item):\n if len(self.items) > 0:\n self.items.remove(item)\n print(f\"YOU HAVE DROPPED THE {item}.\")\n else:\n print(\"You don't have any items to drop!\")\n\n# add for player to print what items they have\n# def print_items\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def send_ty():
DonorName = 'list'
while DonorName == 'list':
DonorName = input(
'"Provide Donor Full Name, or type: "List" to display a list of all donors => '
)
if DonorName.lower().strip() == 'list':
view_donors()
continue
if DonorName[:1].lower() == 'e':
return None
DonorName = DonorName.strip()
donor_amount = ask_donation_amount(DonorName)
if donor_amount is None:
return None
append_donation(DonorName, donor_amount)
print(ty_letter(DonorName, donor_amount), end='\n\n')
def ty_letter(name, amount):
return f"""
Thank you, {name} for donating ${amount:.2f}"""
def ask_donation_amount(name):
response = input(f'How much did {name} donate? ')
if response[:1].lower() == 'e':
return None
return float(response)
def append_donation(name, amount):
donor_list.setdefault(name, []).append(amount)
<|reserved_special_token_0|>
def report_sort(item):
return item[1]
def create_report():
print()
print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')
)
print('-' * 60)
for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):
print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),
sum(v) / len(v)))
def exit_program():
print('Program Exited!')
sys.exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_ty():
DonorName = 'list'
while DonorName == 'list':
DonorName = input(
'"Provide Donor Full Name, or type: "List" to display a list of all donors => '
)
if DonorName.lower().strip() == 'list':
view_donors()
continue
if DonorName[:1].lower() == 'e':
return None
DonorName = DonorName.strip()
donor_amount = ask_donation_amount(DonorName)
if donor_amount is None:
return None
append_donation(DonorName, donor_amount)
print(ty_letter(DonorName, donor_amount), end='\n\n')
def ty_letter(name, amount):
return f"""
Thank you, {name} for donating ${amount:.2f}"""
def ask_donation_amount(name):
response = input(f'How much did {name} donate? ')
if response[:1].lower() == 'e':
return None
return float(response)
def append_donation(name, amount):
donor_list.setdefault(name, []).append(amount)
def view_donors():
for donor in donor_list:
print(f'{donor}')
def report_sort(item):
return item[1]
def create_report():
print()
print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')
)
print('-' * 60)
for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):
print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),
sum(v) / len(v)))
def exit_program():
print('Program Exited!')
sys.exit()
def main():
menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}
prompt_menu = '\n'.join(('', 'Charity Management Application',
'Please choose from below options:', '', '1 - Send a Thank You',
'2 - Create a Report', '3 - Exit', '>>> '))
while True:
response = input(prompt_menu)
menu_dict[response]()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_ty():
DonorName = 'list'
while DonorName == 'list':
DonorName = input(
'"Provide Donor Full Name, or type: "List" to display a list of all donors => '
)
if DonorName.lower().strip() == 'list':
view_donors()
continue
if DonorName[:1].lower() == 'e':
return None
DonorName = DonorName.strip()
donor_amount = ask_donation_amount(DonorName)
if donor_amount is None:
return None
append_donation(DonorName, donor_amount)
print(ty_letter(DonorName, donor_amount), end='\n\n')
def ty_letter(name, amount):
return f"""
Thank you, {name} for donating ${amount:.2f}"""
def ask_donation_amount(name):
response = input(f'How much did {name} donate? ')
if response[:1].lower() == 'e':
return None
return float(response)
def append_donation(name, amount):
donor_list.setdefault(name, []).append(amount)
def view_donors():
for donor in donor_list:
print(f'{donor}')
def report_sort(item):
return item[1]
def create_report():
print()
print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')
)
print('-' * 60)
for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):
print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),
sum(v) / len(v)))
def exit_program():
print('Program Exited!')
sys.exit()
def main():
menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}
prompt_menu = '\n'.join(('', 'Charity Management Application',
'Please choose from below options:', '', '1 - Send a Thank You',
'2 - Create a Report', '3 - Exit', '>>> '))
while True:
response = input(prompt_menu)
menu_dict[response]()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import sys
import math
donor_list = {'William Gates': [1010, 2020, 3030], 'Mark Zuckerberg': [5500,
4400], 'Jeff Bezos': [6745, 2345, 3845], 'Paul Allen': [9999, 8888, 7777]}
def send_ty():
DonorName = 'list'
while DonorName == 'list':
DonorName = input(
'"Provide Donor Full Name, or type: "List" to display a list of all donors => '
)
if DonorName.lower().strip() == 'list':
view_donors()
continue
if DonorName[:1].lower() == 'e':
return None
DonorName = DonorName.strip()
donor_amount = ask_donation_amount(DonorName)
if donor_amount is None:
return None
append_donation(DonorName, donor_amount)
print(ty_letter(DonorName, donor_amount), end='\n\n')
def ty_letter(name, amount):
return f"""
Thank you, {name} for donating ${amount:.2f}"""
def ask_donation_amount(name):
response = input(f'How much did {name} donate? ')
if response[:1].lower() == 'e':
return None
return float(response)
def append_donation(name, amount):
donor_list.setdefault(name, []).append(amount)
def view_donors():
for donor in donor_list:
print(f'{donor}')
def report_sort(item):
return item[1]
def create_report():
print()
print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')
)
print('-' * 60)
for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):
print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),
sum(v) / len(v)))
def exit_program():
print('Program Exited!')
sys.exit()
def main():
menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}
prompt_menu = '\n'.join(('', 'Charity Management Application',
'Please choose from below options:', '', '1 - Send a Thank You',
'2 - Create a Report', '3 - Exit', '>>> '))
while True:
response = input(prompt_menu)
menu_dict[response]()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# ------------------------------------#
# Title: Mailroom Part 1
# Dev: SChang
# Date: Feb 2nd, 2019
# ChangeLog: (Who, When, What)
# SChang,02/02/2019, Created Script
# ------------------------------------#
import os
import sys
import math
donor_list = {"William Gates": [1010, 2020, 3030],
"Mark Zuckerberg": [5500, 4400],
"Jeff Bezos": [6745, 2345, 3845],
"Paul Allen": [9999, 8888, 7777]
}
# function for sending either adding new donor or checking against donor list
def send_ty():
DonorName = "list"
while DonorName == "list":
DonorName = input(""""Provide Donor Full Name, or type: "List" to display a list of all donors => """)
if DonorName.lower().strip() == "list":
view_donors()
continue
if DonorName[:1].lower() == "e":
return None
DonorName = DonorName.strip()
donor_amount = ask_donation_amount(DonorName)
if donor_amount is None:
return None
append_donation(DonorName, donor_amount)
print(ty_letter(DonorName, donor_amount), end='\n\n')
# function that recognizes name and donation amount which is passed through the send_ty function for print
def ty_letter(name,amount):
return f"""
Thank you, {name} for donating ${amount:.2f}"""
# function that is passed through send_ty function defined by donor_amount
def ask_donation_amount(name):
response = input(f"How much did {name} donate? ")
if response [:1].lower() == 'e':
return None
return float(response)
# function appending name/amount to the donor list if new
def append_donation(name, amount):
donor_list.setdefault(name, []).append(amount)
# viewing list of donors if "List" is entered from menu
def view_donors():
for donor in donor_list:
print(f"{donor}")
def report_sort(item):
return item[1]
# function for report that is formatted with donor information
def create_report():
print()
print("{:<20}| Total Given | Num Gifts | Average Gift".format("Donor Name"))
print("-" * 60)
for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):
print("{:<21}${:>11.2f}{:>12} ${:>12.2f}".format(d, sum(v), len(v),
sum(v) / len(v)))
# function for exit option off menu
def exit_program ():
print("Program Exited!")
sys.exit()
def main():
menu_dict = {
"1": send_ty,
"2": create_report,
"3": exit_program
}
prompt_menu = "\n".join(("",
"Charity Management Application",
"Please choose from below options:",
"",
"1 - Send a Thank You",
"2 - Create a Report",
"3 - Exit",
">>> "))
while True:
response = input(prompt_menu)
menu_dict[response]()
if __name__ == "__main__":
# Guards against code running automatically if module is imported
main()
|
flexible
|
{
"blob_id": "f2292d1816699392663bdbf7a06c334de3b2022c",
"index": 7118,
"step-1": "<mask token>\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\n<mask token>\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\ndef view_donors():\n for donor in donor_list:\n print(f'{donor}')\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\ndef main():\n menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}\n prompt_menu = '\\n'.join(('', 'Charity Management Application',\n 'Please choose from below options:', '', '1 - Send a Thank You',\n '2 - Create a Report', '3 - Exit', '>>> '))\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\ndef view_donors():\n for donor in donor_list:\n print(f'{donor}')\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\ndef main():\n menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}\n prompt_menu = '\\n'.join(('', 'Charity Management Application',\n 'Please choose from below options:', '', '1 - Send a Thank You',\n '2 - Create a Report', '3 - Exit', '>>> '))\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport math\ndonor_list = {'William Gates': [1010, 2020, 3030], 'Mark Zuckerberg': [5500,\n 4400], 'Jeff Bezos': [6745, 2345, 3845], 'Paul Allen': [9999, 8888, 7777]}\n\n\ndef send_ty():\n DonorName = 'list'\n while DonorName == 'list':\n DonorName = input(\n '\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => '\n )\n if DonorName.lower().strip() == 'list':\n view_donors()\n continue\n if DonorName[:1].lower() == 'e':\n return None\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\ndef ty_letter(name, amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\ndef ask_donation_amount(name):\n response = input(f'How much did {name} donate? ')\n if response[:1].lower() == 'e':\n return None\n return float(response)\n\n\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\ndef view_donors():\n for donor in donor_list:\n print(f'{donor}')\n\n\ndef report_sort(item):\n return item[1]\n\n\ndef create_report():\n print()\n print('{:<20}| Total Given | Num Gifts | Average Gift'.format('Donor Name')\n )\n print('-' * 60)\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print('{:<21}${:>11.2f}{:>12} ${:>12.2f}'.format(d, sum(v), len(v),\n sum(v) / len(v)))\n\n\ndef exit_program():\n print('Program Exited!')\n sys.exit()\n\n\ndef main():\n menu_dict = {'1': send_ty, '2': create_report, '3': exit_program}\n prompt_menu = '\\n'.join(('', 'Charity Management Application',\n 'Please choose from below options:', '', '1 - Send a Thank You',\n '2 - Create a Report', '3 - Exit', '>>> '))\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# ------------------------------------#\n# Title: Mailroom Part 1\n# Dev: SChang\n# Date: Feb 2nd, 2019\n# ChangeLog: (Who, When, What)\n# SChang,02/02/2019, Created Script\n# ------------------------------------#\nimport os\nimport sys\nimport math\n\ndonor_list = {\"William Gates\": [1010, 2020, 3030],\n \"Mark Zuckerberg\": [5500, 4400],\n \"Jeff Bezos\": [6745, 2345, 3845],\n \"Paul Allen\": [9999, 8888, 7777]\n }\n\n\n# function for sending either adding new donor or checking against donor list\ndef send_ty():\n DonorName = \"list\"\n while DonorName == \"list\":\n DonorName = input(\"\"\"\"Provide Donor Full Name, or type: \"List\" to display a list of all donors => \"\"\")\n if DonorName.lower().strip() == \"list\":\n view_donors()\n continue\n if DonorName[:1].lower() == \"e\":\n return None\n\n DonorName = DonorName.strip()\n donor_amount = ask_donation_amount(DonorName)\n if donor_amount is None:\n return None\n append_donation(DonorName, donor_amount)\n\n print(ty_letter(DonorName, donor_amount), end='\\n\\n')\n\n\n# function that recognizes name and donation amount which is passed through the send_ty function for print\ndef ty_letter(name,amount):\n return f\"\"\"\n Thank you, {name} for donating ${amount:.2f}\"\"\"\n\n\n# function that is passed through send_ty function defined by donor_amount\ndef ask_donation_amount(name):\n response = input(f\"How much did {name} donate? \")\n if response [:1].lower() == 'e':\n return None\n return float(response)\n\n\n# function appending name/amount to the donor list if new\ndef append_donation(name, amount):\n donor_list.setdefault(name, []).append(amount)\n\n\n# viewing list of donors if \"List\" is entered from menu\ndef view_donors():\n for donor in donor_list:\n print(f\"{donor}\")\n\n\ndef report_sort(item):\n return item[1]\n\n\n# function for report that is formatted with donor information\ndef create_report():\n print()\n print(\"{:<20}| Total Given | Num Gifts | Average Gift\".format(\"Donor Name\"))\n print(\"-\" * 60)\n\n for d, v in sorted(donor_list.items(), key=report_sort, reverse=True):\n print(\"{:<21}${:>11.2f}{:>12} ${:>12.2f}\".format(d, sum(v), len(v),\n\n sum(v) / len(v)))\n\n\n# function for exit option off menu\ndef exit_program ():\n print(\"Program Exited!\")\n sys.exit()\n\n\ndef main():\n menu_dict = {\n \"1\": send_ty,\n \"2\": create_report,\n \"3\": exit_program\n }\n\n prompt_menu = \"\\n\".join((\"\",\n \"Charity Management Application\",\n \"Please choose from below options:\",\n \"\",\n \"1 - Send a Thank You\",\n \"2 - Create a Report\",\n \"3 - Exit\",\n \">>> \"))\n\n while True:\n response = input(prompt_menu)\n menu_dict[response]()\n\n\nif __name__ == \"__main__\":\n # Guards against code running automatically if module is imported\n main()\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TeacherForm(forms.Form):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TeacherForm(forms.Form):
name = forms.CharField(label='Your Name', max_length=100, widget=forms.
TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'form-control text-center w-75 mx-auto'}))
<|reserved_special_token_1|>
from django import forms
class TeacherForm(forms.Form):
name = forms.CharField(label='Your Name', max_length=100, widget=forms.
TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'form-control text-center w-75 mx-auto'}))
|
flexible
|
{
"blob_id": "7c5877eea78c3fa8b7928219edd52e2502c16c09",
"index": 6392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TeacherForm(forms.Form):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TeacherForm(forms.Form):\n name = forms.CharField(label='Your Name', max_length=100, widget=forms.\n TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'form-control text-center w-75 mx-auto'}))\n",
"step-4": "from django import forms\n\n\nclass TeacherForm(forms.Form):\n name = forms.CharField(label='Your Name', max_length=100, widget=forms.\n TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'form-control text-center w-75 mx-auto'}))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=
attrs_before)
<|reserved_special_token_0|>
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
<|reserved_special_token_0|>
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,
4294967295, 4886718345])
def test_overflow_int(cursor: pyodbc.Cursor):
input = 9999999999999999999999999999999999999
cursor.execute('create table t1(d bigint)')
with pytest.raises(OverflowError):
cursor.execute('insert into t1 values (?)', input)
result = cursor.execute('select * from t1').fetchall()
assert result == []
<|reserved_special_token_0|>
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
<|reserved_special_token_0|>
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
<|reserved_special_token_0|>
def test_multiple_bindings(cursor: pyodbc.Cursor):
"""More than one bind and select on a cursor"""
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (?)', 1)
cursor.execute('insert into t1 values (?)', 2)
cursor.execute('insert into t1 values (?)', 3)
for _ in range(3):
cursor.execute('select n from t1 where n < ?', 10)
cursor.execute('select n from t1 where n < 3')
<|reserved_special_token_0|>
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f'create table t1(c1 {datatype})')
else:
maxlen = lengths[-1]
cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')
for length in lengths:
cursor.execute('delete from t1')
encoding = datatype in ('blob', 'varbinary') and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute('insert into t1 values(?)', value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
<|reserved_special_token_0|>
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
<|reserved_special_token_0|>
def test_native_uuid(cursor: pyodbc.Cursor):
value = uuid.uuid4()
cursor.execute('create table t1(n uniqueidentifier)')
cursor.execute('insert into t1 values (?)', value)
pyodbc.native_uuid = True
result = cursor.execute('select n from t1').fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
<|reserved_special_token_0|>
@pytest.mark.skipif(IS_FREEDTS, reason=
'https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
<|reserved_special_token_0|>
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute('create table t1(b bit)')
cursor.execute('insert into t1 values (?)', value)
v = cursor.execute('select b from t1').fetchone()[0]
assert isinstance(v, bool)
assert v == value
<|reserved_special_token_0|>
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5))
cursor.execute('create table t1(d decimal(10, 2))')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select * from t1').fetchone()[0]
assert result == value
<|reserved_special_token_0|>
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '')
<|reserved_special_token_0|>
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '1')
row = cursor.execute('select * from t1').fetchone()
assert row[0] == '1'
assert row[-1] == '1'
def test_version():
assert 3 == len(pyodbc.version.split('.'))
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=
'Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute('create table t1(d date)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select d from t1').fetchone()[0]
assert isinstance(result, date)
assert value == result
<|reserved_special_token_0|>
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute('create table t1(dt datetime2)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert value == result
<|reserved_special_token_0|>
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
"""
)
cursor.execute('exec proc1')
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
<|reserved_special_token_0|>
def test_sp_with_dates(cursor: pyodbc.Cursor):
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
"""
)
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
"""
)
cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0
<|reserved_special_token_0|>
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute('create table t1(i int)')
count = 4
for i in range(count):
cursor.execute('insert into t1 values (?)', i)
cursor.execute('select * from t1')
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
<|reserved_special_token_0|>
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute('create table t1(i int)')
cursor.execute('insert into t1 values (1)')
v = cursor.execute('delete from t1')
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute('create table t1(i int)')
v = cursor.execute('delete from t1')
assert v == cursor
<|reserved_special_token_0|>
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute('create table t1(a int, b char(3))')
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute('select * from t1').fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s char(7))')
cursor.execute('insert into t1 values(?)', 'testing')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
cursor.execute('select s into t2 from t1')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
<|reserved_special_token_0|>
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d int)')
cursor.execute('insert into t1 values(1,2,3,4)')
row = cursor.execute('select * from t1').fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d varchar(50))')
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute('select * from t1').fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == '(1, 2, 3)'
result = str(row[:1])
assert result == '(1,)'
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute(
'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'
)
cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)
row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()
assert row.both == v2 + v3
<|reserved_special_token_0|>
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute('drop procedure pyodbctest')
cursor.commit()
except:
pass
cursor.execute('create table t1(s varchar(10))')
cursor.execute('insert into t1 values(?)', 'testing')
cursor.execute(
"""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
"""
)
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
cursor.execute('create table t1(id int)')
for i in range(1, 5):
cursor.execute('insert into t1 values(?)', i)
cursor.execute('select id from t1 order by id')
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
<|reserved_special_token_0|>
def test_sets_execute(cursor: pyodbc.Cursor):
cursor.execute('create table t1 (word varchar (100))')
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute('insert into t1 (word) values (?)', words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany('insert into t1 (word) values (?)', words)
<|reserved_special_token_0|>
def test_row_executemany(cursor: pyodbc.Cursor):
"""Ensure we can use a Row object as a parameter to executemany"""
cursor.execute('create table t1(n int, s varchar(10))')
for i in range(3):
cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))
rows = cursor.execute('select n, s from t1').fetchall()
assert len(rows) != 0
cursor.execute('create table t2(n int, s varchar(10))')
cursor.executemany('insert into t2 values (?, ?)', rows)
def test_description(cursor: pyodbc.Cursor):
"""Ensure cursor.description is correct"""
cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute('select * from t1')
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0
assert t[6] is True
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8
assert t[5] == 0
assert t[6] is True
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5
assert t[5] == 2
assert t[6] is True
<|reserved_special_token_0|>
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute(
"""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
"""
)
cursor.execute('exec test_cursor_messages')
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 1a', 'Message 1b']
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 2a', 'Message 2b']
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
<|reserved_special_token_0|>
def test_context_manager_success():
"""Ensure `with` commits if an exception is not raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cnxn.commit()
with cnxn:
cursor.execute('insert into t1 values (1)')
rows = cursor.execute('select n from t1').fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"""Ensure `with` rolls back if an exception is raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (1)')
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute('insert into t1 values (2)')
cursor.execute('delete from bogus')
cursor.execute('select max(n) from t1')
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
value = cursor.execute('select ?', None).fetchone()[0]
assert value is None
<|reserved_special_token_0|>
def test_columns(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(
f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
"""
)
col_count = len([col.column_name for col in cursor.columns(table_name)]
)
assert col_count == 1
cursor.execute(f'drop table {table_name}')
<|reserved_special_token_0|>
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
v = 'x 🌜 z'
cursor.execute('create table t1(s nvarchar(100))')
cursor.execute('insert into t1 values (?)', v)
result = cursor.execute('select s from t1').fetchone()[0]
assert result == v
<|reserved_special_token_0|>
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
<|reserved_special_token_0|>
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = remaining + len(seed) - 1 // len(seed)
v += seed * c
if encoding:
v = v.encode(encoding)
v = v[:length]
return v
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=
attrs_before)
<|reserved_special_token_0|>
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason=
'(max) not supported until 2005')
def test_unicode_longmax(cursor: pyodbc.Cursor):
cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))")
<|reserved_special_token_0|>
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,
4294967295, 4886718345])
def test_overflow_int(cursor: pyodbc.Cursor):
input = 9999999999999999999999999999999999999
cursor.execute('create table t1(d bigint)')
with pytest.raises(OverflowError):
cursor.execute('insert into t1 values (?)', input)
result = cursor.execute('select * from t1').fetchall()
assert result == []
<|reserved_special_token_0|>
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
<|reserved_special_token_0|>
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
<|reserved_special_token_0|>
def test_exc_integrity(cursor: pyodbc.Cursor):
"""Make sure an IntegretyError is raised"""
cursor.execute('create table t1(s1 varchar(10) primary key)')
cursor.execute("insert into t1 values ('one')")
with pytest.raises(pyodbc.IntegrityError):
cursor.execute("insert into t1 values ('one')")
def test_multiple_bindings(cursor: pyodbc.Cursor):
"""More than one bind and select on a cursor"""
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (?)', 1)
cursor.execute('insert into t1 values (?)', 2)
cursor.execute('insert into t1 values (?)', 3)
for _ in range(3):
cursor.execute('select n from t1 where n < ?', 10)
cursor.execute('select n from t1 where n < 3')
<|reserved_special_token_0|>
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f'create table t1(c1 {datatype})')
else:
maxlen = lengths[-1]
cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')
for length in lengths:
cursor.execute('delete from t1')
encoding = datatype in ('blob', 'varbinary') and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute('insert into t1 values(?)', value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
<|reserved_special_token_0|>
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
<|reserved_special_token_0|>
def test_native_uuid(cursor: pyodbc.Cursor):
value = uuid.uuid4()
cursor.execute('create table t1(n uniqueidentifier)')
cursor.execute('insert into t1 values (?)', value)
pyodbc.native_uuid = True
result = cursor.execute('select n from t1').fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
<|reserved_special_token_0|>
@pytest.mark.skipif(IS_FREEDTS, reason=
'https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
<|reserved_special_token_0|>
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute('create table t1(b bit)')
cursor.execute('insert into t1 values (?)', value)
v = cursor.execute('select b from t1').fetchone()[0]
assert isinstance(v, bool)
assert v == value
def test_decimal(cursor: pyodbc.Cursor):
for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0,
False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),
(38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (
38, 38, True)]:
try:
cursor.execute('drop table t1')
except:
pass
cursor.execute(f'create table t1(d decimal({precision}, {scale}))')
sign = negative and '-' or ''
before = '9' * (precision - scale)
after = scale and '.' + '9' * scale or ''
decStr = f'{sign}{before}{after}'
value = Decimal(decStr)
cursor.execute('insert into t1 values(?)', value)
v = cursor.execute('select d from t1').fetchone()[0]
assert v == value
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5))
cursor.execute('create table t1(d decimal(10, 2))')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select * from t1').fetchone()[0]
assert result == value
<|reserved_special_token_0|>
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '')
<|reserved_special_token_0|>
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '1')
row = cursor.execute('select * from t1').fetchone()
assert row[0] == '1'
assert row[-1] == '1'
def test_version():
assert 3 == len(pyodbc.version.split('.'))
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=
'Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute('create table t1(d date)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select d from t1').fetchone()[0]
assert isinstance(result, date)
assert value == result
<|reserved_special_token_0|>
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute('create table t1(dt datetime2)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_sp_results(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
"""
)
rows = cursor.execute('exec proc1').fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
"""
)
cursor.execute('exec proc1')
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
<|reserved_special_token_0|>
def test_sp_with_dates(cursor: pyodbc.Cursor):
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
"""
)
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
"""
)
cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0
<|reserved_special_token_0|>
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute('create table t1(i int)')
count = 4
for i in range(count):
cursor.execute('insert into t1 values (?)', i)
cursor.execute('select * from t1')
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
<|reserved_special_token_0|>
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute('create table t1(i int)')
cursor.execute('insert into t1 values (1)')
v = cursor.execute('delete from t1')
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute('create table t1(i int)')
v = cursor.execute('delete from t1')
assert v == cursor
<|reserved_special_token_0|>
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute('create table t1(a int, b char(3))')
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute('select * from t1').fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s char(7))')
cursor.execute('insert into t1 values(?)', 'testing')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
cursor.execute('select s into t2 from t1')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
<|reserved_special_token_0|>
def test_executemany_dae_0(cursor: pyodbc.Cursor):
"""
DAE for 0-length value
"""
cursor.execute('create table t1(a nvarchar(max))')
cursor.fast_executemany = True
cursor.executemany('insert into t1(a) values(?)', [['']])
assert cursor.execute('select a from t1').fetchone()[0] == ''
cursor.fast_executemany = False
<|reserved_special_token_0|>
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d int)')
cursor.execute('insert into t1 values(1,2,3,4)')
row = cursor.execute('select * from t1').fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d varchar(50))')
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute('select * from t1').fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == '(1, 2, 3)'
result = str(row[:1])
assert result == '(1,)'
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute(
'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'
)
cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)
row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()
assert row.both == v2 + v3
<|reserved_special_token_0|>
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute('drop procedure pyodbctest')
cursor.commit()
except:
pass
cursor.execute('create table t1(s varchar(10))')
cursor.execute('insert into t1 values(?)', 'testing')
cursor.execute(
"""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
"""
)
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
cursor.execute('create table t1(id int)')
for i in range(1, 5):
cursor.execute('insert into t1 values(?)', i)
cursor.execute('select id from t1 order by id')
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
def test_timeout():
cnxn = connect()
assert cnxn.timeout == 0
cnxn.timeout = 30
assert cnxn.timeout == 30
cnxn.timeout = 0
assert cnxn.timeout == 0
def test_sets_execute(cursor: pyodbc.Cursor):
cursor.execute('create table t1 (word varchar (100))')
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute('insert into t1 (word) values (?)', words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany('insert into t1 (word) values (?)', words)
<|reserved_special_token_0|>
def test_row_executemany(cursor: pyodbc.Cursor):
"""Ensure we can use a Row object as a parameter to executemany"""
cursor.execute('create table t1(n int, s varchar(10))')
for i in range(3):
cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))
rows = cursor.execute('select n, s from t1').fetchall()
assert len(rows) != 0
cursor.execute('create table t2(n int, s varchar(10))')
cursor.executemany('insert into t2 values (?, ?)', rows)
def test_description(cursor: pyodbc.Cursor):
"""Ensure cursor.description is correct"""
cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute('select * from t1')
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0
assert t[6] is True
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8
assert t[5] == 0
assert t[6] is True
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5
assert t[5] == 2
assert t[6] is True
<|reserved_special_token_0|>
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute(
"""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
"""
)
cursor.execute('exec test_cursor_messages')
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 1a', 'Message 1b']
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 2a', 'Message 2b']
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
def test_none_param(cursor: pyodbc.Cursor):
"""Ensure None can be used for params other than the first"""
cursor.execute('create table t1(n int, blob varbinary(max))')
cursor.execute('insert into t1 values (1, newid())')
row = cursor.execute('select * from t1').fetchone()
assert row.n == 1
assert isinstance(row.blob, bytes)
sql = 'update t1 set n=?, blob=?'
try:
cursor.execute(sql, 2, None)
except pyodbc.DataError:
if IS_FREEDTS:
cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])
cursor.execute(sql, 2, None)
else:
raise
row = cursor.execute('select * from t1').fetchone()
assert row.n == 2
assert row.blob is None
def test_output_conversion():
def convert1(value):
return 'X' + value.decode('latin1') + 'X'
def convert2(value):
return 'Y' + value.decode('latin1') + 'Y'
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int, v varchar(10))')
cursor.execute("insert into t1 values (1, '123.45')")
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.clear_output_converters()
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is not None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.clear_output_converters()
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
<|reserved_special_token_0|>
def test_context_manager_success():
"""Ensure `with` commits if an exception is not raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cnxn.commit()
with cnxn:
cursor.execute('insert into t1 values (1)')
rows = cursor.execute('select n from t1').fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"""Ensure `with` rolls back if an exception is raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (1)')
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute('insert into t1 values (2)')
cursor.execute('delete from bogus')
cursor.execute('select max(n) from t1')
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
value = cursor.execute('select ?', None).fetchone()[0]
assert value is None
def test_large_update_nodata(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a varbinary(max))')
hundredkb = b'x' * 100 * 1024
cursor.execute('update t1 set a=? where 1=0', (hundredkb,))
<|reserved_special_token_0|>
def test_columns(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(
f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
"""
)
col_count = len([col.column_name for col in cursor.columns(table_name)]
)
assert col_count == 1
cursor.execute(f'drop table {table_name}')
<|reserved_special_token_0|>
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
v = 'x 🌜 z'
cursor.execute('create table t1(s nvarchar(100))')
cursor.execute('insert into t1 values (?)', v)
result = cursor.execute('select s from t1').fetchone()[0]
assert result == v
<|reserved_special_token_0|>
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
<|reserved_special_token_0|>
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = remaining + len(seed) - 1 // len(seed)
v += seed * c
if encoding:
v = v.encode(encoding)
v = v[:length]
return v
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=
attrs_before)
<|reserved_special_token_0|>
def test_text(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'text')
<|reserved_special_token_0|>
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason=
'(max) not supported until 2005')
def test_unicode_longmax(cursor: pyodbc.Cursor):
cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))")
<|reserved_special_token_0|>
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,
4294967295, 4886718345])
def test_overflow_int(cursor: pyodbc.Cursor):
input = 9999999999999999999999999999999999999
cursor.execute('create table t1(d bigint)')
with pytest.raises(OverflowError):
cursor.execute('insert into t1 values (?)', input)
result = cursor.execute('select * from t1').fetchall()
assert result == []
<|reserved_special_token_0|>
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
<|reserved_special_token_0|>
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
def test_decode_meta(cursor: pyodbc.Cursor):
"""
Ensure column names with non-ASCII characters are converted using the configured encodings.
"""
cursor.execute('create table t1(a int)')
cursor.execute('insert into t1 values (1)')
cursor.execute('select a as "Tipología" from t1')
assert cursor.description[0][0] == 'Tipología'
def test_exc_integrity(cursor: pyodbc.Cursor):
"""Make sure an IntegretyError is raised"""
cursor.execute('create table t1(s1 varchar(10) primary key)')
cursor.execute("insert into t1 values ('one')")
with pytest.raises(pyodbc.IntegrityError):
cursor.execute("insert into t1 values ('one')")
def test_multiple_bindings(cursor: pyodbc.Cursor):
"""More than one bind and select on a cursor"""
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (?)', 1)
cursor.execute('insert into t1 values (?)', 2)
cursor.execute('insert into t1 values (?)', 3)
for _ in range(3):
cursor.execute('select n from t1 where n < ?', 10)
cursor.execute('select n from t1 where n < 3')
<|reserved_special_token_0|>
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f'create table t1(c1 {datatype})')
else:
maxlen = lengths[-1]
cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')
for length in lengths:
cursor.execute('delete from t1')
encoding = datatype in ('blob', 'varbinary') and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute('insert into t1 values(?)', value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
<|reserved_special_token_0|>
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
<|reserved_special_token_0|>
def test_native_uuid(cursor: pyodbc.Cursor):
value = uuid.uuid4()
cursor.execute('create table t1(n uniqueidentifier)')
cursor.execute('insert into t1 values (?)', value)
pyodbc.native_uuid = True
result = cursor.execute('select n from t1').fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
<|reserved_special_token_0|>
@pytest.mark.skipif(IS_FREEDTS, reason=
'https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
<|reserved_special_token_0|>
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute('create table t1(b bit)')
cursor.execute('insert into t1 values (?)', value)
v = cursor.execute('select b from t1').fetchone()[0]
assert isinstance(v, bool)
assert v == value
def test_decimal(cursor: pyodbc.Cursor):
for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0,
False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),
(38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (
38, 38, True)]:
try:
cursor.execute('drop table t1')
except:
pass
cursor.execute(f'create table t1(d decimal({precision}, {scale}))')
sign = negative and '-' or ''
before = '9' * (precision - scale)
after = scale and '.' + '9' * scale or ''
decStr = f'{sign}{before}{after}'
value = Decimal(decStr)
cursor.execute('insert into t1 values(?)', value)
v = cursor.execute('select d from t1').fetchone()[0]
assert v == value
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5))
cursor.execute('create table t1(d decimal(10, 2))')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select * from t1').fetchone()[0]
assert result == value
<|reserved_special_token_0|>
def test_close_cnxn():
"""Make sure using a Cursor after closing its connection doesn't crash."""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('drop table if exists t1')
cursor.execute('create table t1(id integer, s varchar(20))')
cursor.execute('insert into t1 values (?,?)', 1, 'test')
cursor.execute('select * from t1')
cnxn.close()
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute('select * from t1')
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '')
def test_empty_string_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ''
cursor = cnxn.cursor()
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', value)
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
<|reserved_special_token_0|>
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '1')
row = cursor.execute('select * from t1').fetchone()
assert row[0] == '1'
assert row[-1] == '1'
def test_version():
assert 3 == len(pyodbc.version.split('.'))
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=
'Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute('create table t1(d date)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select d from t1').fetchone()[0]
assert isinstance(result, date)
assert value == result
<|reserved_special_token_0|>
def test_datetime_fraction_rounded(cursor: pyodbc.Cursor):
full = datetime(2007, 1, 15, 3, 4, 5, 123456)
rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute('create table t1(dt datetime)')
cursor.execute('insert into t1 values (?)', full)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert rounded == result
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute('create table t1(dt datetime2)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_sp_results(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
"""
)
rows = cursor.execute('exec proc1').fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
"""
)
cursor.execute('exec proc1')
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
<|reserved_special_token_0|>
def test_sp_with_dates(cursor: pyodbc.Cursor):
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
"""
)
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
"""
)
cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0
<|reserved_special_token_0|>
def test_rowcount_delete(cursor: pyodbc.Cursor):
assert cursor.rowcount == -1
cursor.execute('create table t1(i int)')
count = 4
for i in range(count):
cursor.execute('insert into t1 values (?)', i)
cursor.execute('delete from t1')
assert cursor.rowcount == count
<|reserved_special_token_0|>
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute('create table t1(i int)')
count = 4
for i in range(count):
cursor.execute('insert into t1 values (?)', i)
cursor.execute('select * from t1')
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
<|reserved_special_token_0|>
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute('create table t1(i int)')
cursor.execute('insert into t1 values (1)')
v = cursor.execute('delete from t1')
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute('create table t1(i int)')
v = cursor.execute('delete from t1')
assert v == cursor
<|reserved_special_token_0|>
def table_with_spaces(cursor: pyodbc.Cursor):
"""Ensure we can select using [x z] syntax"""
try:
cursor.execute('create table [test one](int n)')
cursor.execute('insert into [test one] values(1)')
cursor.execute('select * from [test one]')
v = cursor.fetchone()[0]
assert v == 1
finally:
cursor.rollback()
<|reserved_special_token_0|>
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute('create table t1(a int, b char(3))')
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute('select * from t1').fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s char(7))')
cursor.execute('insert into t1 values(?)', 'testing')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
cursor.execute('select s into t2 from t1')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
def test_executemany(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b varchar(10))')
params = [(i, str(i)) for i in range(1, 6)]
cursor.executemany('insert into t1(a, b) values (?,?)', params)
count = cursor.execute('select count(*) from t1').fetchone()[0]
assert count == len(params)
cursor.execute('select a, b from t1 order by a')
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
<|reserved_special_token_0|>
def test_executemany_dae_0(cursor: pyodbc.Cursor):
"""
DAE for 0-length value
"""
cursor.execute('create table t1(a nvarchar(max))')
cursor.fast_executemany = True
cursor.executemany('insert into t1(a) values(?)', [['']])
assert cursor.execute('select a from t1').fetchone()[0] == ''
cursor.fast_executemany = False
<|reserved_special_token_0|>
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d int)')
cursor.execute('insert into t1 values(1,2,3,4)')
row = cursor.execute('select * from t1').fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d varchar(50))')
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute('select * from t1').fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == '(1, 2, 3)'
result = str(row[:1])
assert result == '(1,)'
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute(
'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'
)
cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)
row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()
assert row.both == v2 + v3
<|reserved_special_token_0|>
def test_autocommit():
cnxn = connect()
assert cnxn.autocommit is False
cnxn = None
cnxn = connect(autocommit=True)
assert cnxn.autocommit is True
cnxn.autocommit = False
assert cnxn.autocommit is False
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute('drop procedure pyodbctest')
cursor.commit()
except:
pass
cursor.execute('create table t1(s varchar(10))')
cursor.execute('insert into t1 values(?)', 'testing')
cursor.execute(
"""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
"""
)
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
cursor.execute('create table t1(id int)')
for i in range(1, 5):
cursor.execute('insert into t1 values(?)', i)
cursor.execute('select id from t1 order by id')
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
def test_timeout():
cnxn = connect()
assert cnxn.timeout == 0
cnxn.timeout = 30
assert cnxn.timeout == 30
cnxn.timeout = 0
assert cnxn.timeout == 0
def test_sets_execute(cursor: pyodbc.Cursor):
cursor.execute('create table t1 (word varchar (100))')
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute('insert into t1 (word) values (?)', words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany('insert into t1 (word) values (?)', words)
def test_row_execute(cursor: pyodbc.Cursor):
"""Ensure we can use a Row object as a parameter to execute"""
cursor.execute('create table t1(n int, s varchar(10))')
cursor.execute("insert into t1 values (1, 'a')")
row = cursor.execute('select n, s from t1').fetchone()
assert row
cursor.execute('create table t2(n int, s varchar(10))')
cursor.execute('insert into t2 values (?, ?)', row)
def test_row_executemany(cursor: pyodbc.Cursor):
"""Ensure we can use a Row object as a parameter to executemany"""
cursor.execute('create table t1(n int, s varchar(10))')
for i in range(3):
cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))
rows = cursor.execute('select n, s from t1').fetchall()
assert len(rows) != 0
cursor.execute('create table t2(n int, s varchar(10))')
cursor.executemany('insert into t2 values (?, ?)', rows)
def test_description(cursor: pyodbc.Cursor):
"""Ensure cursor.description is correct"""
cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute('select * from t1')
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0
assert t[6] is True
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8
assert t[5] == 0
assert t[6] is True
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5
assert t[5] == 2
assert t[6] is True
<|reserved_special_token_0|>
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute(
"""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
"""
)
cursor.execute('exec test_cursor_messages')
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 1a', 'Message 1b']
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 2a', 'Message 2b']
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
def test_none_param(cursor: pyodbc.Cursor):
"""Ensure None can be used for params other than the first"""
cursor.execute('create table t1(n int, blob varbinary(max))')
cursor.execute('insert into t1 values (1, newid())')
row = cursor.execute('select * from t1').fetchone()
assert row.n == 1
assert isinstance(row.blob, bytes)
sql = 'update t1 set n=?, blob=?'
try:
cursor.execute(sql, 2, None)
except pyodbc.DataError:
if IS_FREEDTS:
cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])
cursor.execute(sql, 2, None)
else:
raise
row = cursor.execute('select * from t1').fetchone()
assert row.n == 2
assert row.blob is None
def test_output_conversion():
def convert1(value):
return 'X' + value.decode('latin1') + 'X'
def convert2(value):
return 'Y' + value.decode('latin1') + 'Y'
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int, v varchar(10))')
cursor.execute("insert into t1 values (1, '123.45')")
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.clear_output_converters()
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is not None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.clear_output_converters()
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
def test_too_large(cursor: pyodbc.Cursor):
"""Ensure error raised if insert fails due to truncation"""
value = 'x' * 1000
cursor.execute('create table t1(s varchar(800))')
with pytest.raises(pyodbc.Error):
cursor.execute('insert into t1 values (?)', value)
<|reserved_special_token_0|>
def test_context_manager_success():
"""Ensure `with` commits if an exception is not raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cnxn.commit()
with cnxn:
cursor.execute('insert into t1 values (1)')
rows = cursor.execute('select n from t1').fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"""Ensure `with` rolls back if an exception is raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (1)')
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute('insert into t1 values (2)')
cursor.execute('delete from bogus')
cursor.execute('select max(n) from t1')
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
value = cursor.execute('select ?', None).fetchone()[0]
assert value is None
def test_large_update_nodata(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a varbinary(max))')
hundredkb = b'x' * 100 * 1024
cursor.execute('update t1 set a=? where 1=0', (hundredkb,))
def test_func_param(cursor: pyodbc.Cursor):
try:
cursor.execute('drop function func1')
except:
pass
cursor.execute(
"""
create function func1 (@testparam varchar(4))
returns @rettest table (param varchar(4))
as
begin
insert @rettest
select @testparam
return
end
"""
)
cursor.commit()
value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]
assert value == 'test'
def test_columns(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(
f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
"""
)
col_count = len([col.column_name for col in cursor.columns(table_name)]
)
assert col_count == 1
cursor.execute(f'drop table {table_name}')
<|reserved_special_token_0|>
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
v = 'x 🌜 z'
cursor.execute('create table t1(s nvarchar(100))')
cursor.execute('insert into t1 values (?)', v)
result = cursor.execute('select s from t1').fetchone()[0]
assert result == v
def test_emoticons_as_literal(cursor: pyodbc.Cursor):
v = 'x 🌜 z'
cursor.execute('create table t1(s nvarchar(100))')
cursor.execute(f"insert into t1 values (N'{v}')")
result = cursor.execute('select s from t1').fetchone()[0]
assert result == v
<|reserved_special_token_0|>
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
def get_sqlserver_version(cursor: pyodbc.Cursor):
"""
Returns the major version: 8-->2000, 9-->2005, 10-->2008
"""
cursor.execute("exec master..xp_msver 'ProductVersion'")
row = cursor.fetchone()
return int(row.Character_Value.split('.', 1)[0])
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = remaining + len(seed) - 1 // len(seed)
v += seed * c
if encoding:
v = v.encode(encoding)
v = v[:length]
return v
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=
attrs_before)
<|reserved_special_token_0|>
@pytest.fixture()
def cursor() ->Iterator[pyodbc.Cursor]:
cnxn = connect()
cur = cnxn.cursor()
cur.execute('drop table if exists t1')
cur.execute('drop table if exists t2')
cur.execute('drop table if exists t3')
cnxn.commit()
yield cur
if not cnxn.closed:
cur.close()
cnxn.close()
def test_text(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'text')
def test_varchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varchar')
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason=
'(max) not supported until 2005')
def test_unicode_longmax(cursor: pyodbc.Cursor):
cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))")
def test_char(cursor: pyodbc.Cursor):
value = 'testing'
cursor.execute('create table t1(s char(7))')
cursor.execute('insert into t1 values(?)', 'testing')
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,
4294967295, 4886718345])
def test_overflow_int(cursor: pyodbc.Cursor):
input = 9999999999999999999999999999999999999
cursor.execute('create table t1(d bigint)')
with pytest.raises(OverflowError):
cursor.execute('insert into t1 values (?)', input)
result = cursor.execute('select * from t1').fetchall()
assert result == []
def test_float(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200,
0.00012345])
def test_non_numeric_float(cursor: pyodbc.Cursor):
cursor.execute('create table t1(d float)')
for input in (float('+Infinity'), float('-Infinity'), float('NaN')):
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute('insert into t1 values (?)', input)
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
def test_getinfo_string():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
assert isinstance(value, str)
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
def test_decode_meta(cursor: pyodbc.Cursor):
"""
Ensure column names with non-ASCII characters are converted using the configured encodings.
"""
cursor.execute('create table t1(a int)')
cursor.execute('insert into t1 values (1)')
cursor.execute('select a as "Tipología" from t1')
assert cursor.description[0][0] == 'Tipología'
def test_exc_integrity(cursor: pyodbc.Cursor):
"""Make sure an IntegretyError is raised"""
cursor.execute('create table t1(s1 varchar(10) primary key)')
cursor.execute("insert into t1 values ('one')")
with pytest.raises(pyodbc.IntegrityError):
cursor.execute("insert into t1 values ('one')")
def test_multiple_bindings(cursor: pyodbc.Cursor):
"""More than one bind and select on a cursor"""
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (?)', 1)
cursor.execute('insert into t1 values (?)', 2)
cursor.execute('insert into t1 values (?)', 3)
for _ in range(3):
cursor.execute('select n from t1 where n < ?', 10)
cursor.execute('select n from t1 where n < 3')
def test_different_bindings(cursor: pyodbc.Cursor):
cursor.execute('create table t1(n int)')
cursor.execute('create table t2(d datetime)')
cursor.execute('insert into t1 values (?)', 1)
cursor.execute('insert into t2 values (?)', datetime.now())
<|reserved_special_token_0|>
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f'create table t1(c1 {datatype})')
else:
maxlen = lengths[-1]
cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')
for length in lengths:
cursor.execute('delete from t1')
encoding = datatype in ('blob', 'varbinary') and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute('insert into t1 values(?)', value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
def _test_scalar(cursor: pyodbc.Cursor, datatype, values):
"""
A simple test wrapper for types that are identical when written and read.
"""
cursor.execute(f'create table t1(c1 {datatype})')
for value in values:
cursor.execute('delete from t1')
cursor.execute('insert into t1 values (?)', value)
v = cursor.execute('select c1 from t1').fetchone()[0]
assert v == value
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
def test_nonnative_uuid(cursor: pyodbc.Cursor):
value = uuid.uuid4()
cursor.execute('create table t1(n uniqueidentifier)')
cursor.execute('insert into t1 values (?)', value)
pyodbc.native_uuid = False
result = cursor.execute('select n from t1').fetchval()
assert isinstance(result, str)
assert result == str(value).upper()
pyodbc.native_uuid = True
def test_native_uuid(cursor: pyodbc.Cursor):
value = uuid.uuid4()
cursor.execute('create table t1(n uniqueidentifier)')
cursor.execute('insert into t1 values (?)', value)
pyodbc.native_uuid = True
result = cursor.execute('select n from t1').fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
<|reserved_special_token_0|>
@pytest.mark.skipif(IS_FREEDTS, reason=
'https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
def test_fixed_unicode(cursor: pyodbc.Cursor):
value = 'tësting'
cursor.execute('create table t1(s nchar(7))')
cursor.execute('insert into t1 values(?)', 'tësting')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
assert v == value
def test_chinese(cursor: pyodbc.Cursor):
v = '我的'
cursor.execute("SELECT N'我的' AS [Name]")
row = cursor.fetchone()
assert row[0] == v
cursor.execute("SELECT N'我的' AS [Name]")
rows = cursor.fetchall()
assert rows[0][0] == v
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute('create table t1(b bit)')
cursor.execute('insert into t1 values (?)', value)
v = cursor.execute('select b from t1').fetchone()[0]
assert isinstance(v, bool)
assert v == value
def test_decimal(cursor: pyodbc.Cursor):
for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0,
False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),
(38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (
38, 38, True)]:
try:
cursor.execute('drop table t1')
except:
pass
cursor.execute(f'create table t1(d decimal({precision}, {scale}))')
sign = negative and '-' or ''
before = '9' * (precision - scale)
after = scale and '.' + '9' * scale or ''
decStr = f'{sign}{before}{after}'
value = Decimal(decStr)
cursor.execute('insert into t1 values(?)', value)
v = cursor.execute('select d from t1').fetchone()[0]
assert v == value
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5))
cursor.execute('create table t1(d decimal(10, 2))')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select * from t1').fetchone()[0]
assert result == value
<|reserved_special_token_0|>
def test_close_cnxn():
"""Make sure using a Cursor after closing its connection doesn't crash."""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('drop table if exists t1')
cursor.execute('create table t1(id integer, s varchar(20))')
cursor.execute('insert into t1 values (?,?)', 1, 'test')
cursor.execute('select * from t1')
cnxn.close()
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute('select * from t1')
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '')
def test_empty_string_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ''
cursor = cnxn.cursor()
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', value)
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
def test_fixed_str(cursor: pyodbc.Cursor):
value = 'testing'
cursor.execute('create table t1(s char(7))')
cursor.execute('insert into t1 values(?)', value)
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
assert v == value
<|reserved_special_token_0|>
def test_empty_unicode_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ''
cursor = cnxn.cursor()
cursor.execute('create table t1(s nvarchar(20))')
cursor.execute('insert into t1 values(?)', value)
v = cursor.execute('select * from t1').fetchone()[0]
assert v == value
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s varchar(20))')
cursor.execute('insert into t1 values(?)', '1')
row = cursor.execute('select * from t1').fetchone()
assert row[0] == '1'
assert row[-1] == '1'
def test_version():
assert 3 == len(pyodbc.version.split('.'))
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=
'Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute('create table t1(d date)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select d from t1').fetchone()[0]
assert isinstance(result, date)
assert value == result
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=
'Time not supported until 2008?')
def test_time(cursor: pyodbc.Cursor):
value = datetime.now().time()
value = value.replace(microsecond=0)
cursor.execute('create table t1(t time)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select t from t1').fetchone()[0]
assert isinstance(result, time)
assert value == result
def test_datetime(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute('create table t1(dt datetime)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute('create table t1(dt datetime)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction_rounded(cursor: pyodbc.Cursor):
full = datetime(2007, 1, 15, 3, 4, 5, 123456)
rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute('create table t1(dt datetime)')
cursor.execute('insert into t1 values (?)', full)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert rounded == result
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute('create table t1(dt datetime2)')
cursor.execute('insert into t1 values (?)', value)
result = cursor.execute('select dt from t1').fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_sp_results(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
"""
)
rows = cursor.execute('exec proc1').fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
"""
)
cursor.execute('exec proc1')
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_vartbl(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)
insert into @tmptbl
select top 10 name, id, xtype, refdate
from sysobjects
select * from @tmptbl
"""
)
cursor.execute('exec proc1')
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10
assert isinstance(rows[0].refdate, datetime)
def test_sp_with_dates(cursor: pyodbc.Cursor):
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
"""
)
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
"""
)
cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0
def test_sp_with_none(cursor: pyodbc.Cursor):
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
"""
)
cursor.execute(
"""
create procedure test_sp(@x varchar(20))
AS
declare @y varchar(20)
set @y = @x
select @y
"""
)
cursor.execute('exec test_sp ?', None)
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] is None
def test_rowcount_delete(cursor: pyodbc.Cursor):
assert cursor.rowcount == -1
cursor.execute('create table t1(i int)')
count = 4
for i in range(count):
cursor.execute('insert into t1 values (?)', i)
cursor.execute('delete from t1')
assert cursor.rowcount == count
<|reserved_special_token_0|>
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute('create table t1(i int)')
count = 4
for i in range(count):
cursor.execute('insert into t1 values (?)', i)
cursor.execute('select * from t1')
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
<|reserved_special_token_0|>
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute('create table t1(i int)')
cursor.execute('insert into t1 values (1)')
v = cursor.execute('delete from t1')
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute('create table t1(i int)')
v = cursor.execute('delete from t1')
assert v == cursor
def test_retcursor_select(cursor: pyodbc.Cursor):
cursor.execute('create table t1(i int)')
cursor.execute('insert into t1 values (1)')
v = cursor.execute('select * from t1')
assert v == cursor
def table_with_spaces(cursor: pyodbc.Cursor):
"""Ensure we can select using [x z] syntax"""
try:
cursor.execute('create table [test one](int n)')
cursor.execute('insert into [test one] values(1)')
cursor.execute('select * from [test one]')
v = cursor.fetchone()[0]
assert v == 1
finally:
cursor.rollback()
def test_lower_case():
"""Ensure pyodbc.lowercase forces returned column names to lowercase."""
try:
pyodbc.lowercase = True
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(Abc int, dEf int)')
cursor.execute('select * from t1')
names = [t[0] for t in cursor.description]
names.sort()
assert names == ['abc', 'def']
finally:
pyodbc.lowercase = False
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute('create table t1(a int, b char(3))')
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute('select * from t1').fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
cursor.execute('create table t1(s char(7))')
cursor.execute('insert into t1 values(?)', 'testing')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
cursor.execute('select s into t2 from t1')
v = cursor.execute('select * from t1').fetchone()[0]
assert isinstance(v, str)
assert v == 'testing'
def test_executemany(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b varchar(10))')
params = [(i, str(i)) for i in range(1, 6)]
cursor.executemany('insert into t1(a, b) values (?,?)', params)
count = cursor.execute('select count(*) from t1').fetchone()[0]
assert count == len(params)
cursor.execute('select a, b from t1 order by a')
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_one(cursor: pyodbc.Cursor):
"""Pass executemany a single sequence"""
cursor.execute('create table t1(a int, b varchar(10))')
params = [(1, 'test')]
cursor.executemany('insert into t1(a, b) values (?,?)', params)
count = cursor.execute('select count(*) from t1').fetchone()[0]
assert count == len(params)
cursor.execute('select a, b from t1 order by a')
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_dae_0(cursor: pyodbc.Cursor):
"""
DAE for 0-length value
"""
cursor.execute('create table t1(a nvarchar(max))')
cursor.fast_executemany = True
cursor.executemany('insert into t1(a) values(?)', [['']])
assert cursor.execute('select a from t1').fetchone()[0] == ''
cursor.fast_executemany = False
<|reserved_special_token_0|>
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d int)')
cursor.execute('insert into t1 values(1,2,3,4)')
row = cursor.execute('select * from t1').fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b int, c int, d varchar(50))')
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute('select * from t1').fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == '(1, 2, 3)'
result = str(row[:1])
assert result == '(1,)'
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute(
'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'
)
cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)
row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()
assert row.both == v2 + v3
def test_view_select(cursor: pyodbc.Cursor):
cursor.execute('create table t1(c1 int identity(1, 1), c2 varchar(50))')
for i in range(3):
cursor.execute('insert into t1(c2) values (?)', f'string{i}')
cursor.execute('create view t2 as select * from t1')
cursor.execute('select * from t2')
rows = cursor.fetchall()
assert rows is not None
assert len(rows) == 3
def test_autocommit():
cnxn = connect()
assert cnxn.autocommit is False
cnxn = None
cnxn = connect(autocommit=True)
assert cnxn.autocommit is True
cnxn.autocommit = False
assert cnxn.autocommit is False
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute('drop procedure pyodbctest')
cursor.commit()
except:
pass
cursor.execute('create table t1(s varchar(10))')
cursor.execute('insert into t1 values(?)', 'testing')
cursor.execute(
"""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
"""
)
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
cursor.execute('create table t1(id int)')
for i in range(1, 5):
cursor.execute('insert into t1 values(?)', i)
cursor.execute('select id from t1 order by id')
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
def test_timeout():
cnxn = connect()
assert cnxn.timeout == 0
cnxn.timeout = 30
assert cnxn.timeout == 30
cnxn.timeout = 0
assert cnxn.timeout == 0
def test_sets_execute(cursor: pyodbc.Cursor):
cursor.execute('create table t1 (word varchar (100))')
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute('insert into t1 (word) values (?)', words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany('insert into t1 (word) values (?)', words)
def test_row_execute(cursor: pyodbc.Cursor):
"""Ensure we can use a Row object as a parameter to execute"""
cursor.execute('create table t1(n int, s varchar(10))')
cursor.execute("insert into t1 values (1, 'a')")
row = cursor.execute('select n, s from t1').fetchone()
assert row
cursor.execute('create table t2(n int, s varchar(10))')
cursor.execute('insert into t2 values (?, ?)', row)
def test_row_executemany(cursor: pyodbc.Cursor):
"""Ensure we can use a Row object as a parameter to executemany"""
cursor.execute('create table t1(n int, s varchar(10))')
for i in range(3):
cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))
rows = cursor.execute('select n, s from t1').fetchall()
assert len(rows) != 0
cursor.execute('create table t2(n int, s varchar(10))')
cursor.executemany('insert into t2 values (?, ?)', rows)
def test_description(cursor: pyodbc.Cursor):
"""Ensure cursor.description is correct"""
cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute('select * from t1')
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0
assert t[6] is True
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8
assert t[5] == 0
assert t[6] is True
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5
assert t[5] == 2
assert t[6] is True
def test_cursor_messages_with_print(cursor: pyodbc.Cursor):
"""
Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.
"""
assert not cursor.messages
for msg in ('hello world', 'ABCDEFGHIJ' * 800):
cursor.execute(f"PRINT '{msg}'")
messages = cursor.messages
assert isinstance(messages, list)
assert len(messages) == 1
assert isinstance(messages[0], tuple)
assert len(messages[0]) == 2
assert isinstance(messages[0][0], str)
assert isinstance(messages[0][1], str)
assert '[01000] (0)' == messages[0][0]
assert messages[0][1].endswith(msg)
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute(
"""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
"""
)
cursor.execute('exec test_cursor_messages')
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 1a', 'Message 1b']
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [re.search('Message \\d[ab]$', m[1]).group(0) for m in cursor.
messages]
assert msgs == ['Message 2a', 'Message 2b']
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
def test_none_param(cursor: pyodbc.Cursor):
"""Ensure None can be used for params other than the first"""
cursor.execute('create table t1(n int, blob varbinary(max))')
cursor.execute('insert into t1 values (1, newid())')
row = cursor.execute('select * from t1').fetchone()
assert row.n == 1
assert isinstance(row.blob, bytes)
sql = 'update t1 set n=?, blob=?'
try:
cursor.execute(sql, 2, None)
except pyodbc.DataError:
if IS_FREEDTS:
cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])
cursor.execute(sql, 2, None)
else:
raise
row = cursor.execute('select * from t1').fetchone()
assert row.n == 2
assert row.blob is None
def test_output_conversion():
def convert1(value):
return 'X' + value.decode('latin1') + 'X'
def convert2(value):
return 'Y' + value.decode('latin1') + 'Y'
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int, v varchar(10))')
cursor.execute("insert into t1 values (1, '123.45')")
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.clear_output_converters()
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is not None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'X123.45X'
cnxn.clear_output_converters()
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute('select v from t1').fetchone()[0]
assert value == '123.45'
def test_too_large(cursor: pyodbc.Cursor):
"""Ensure error raised if insert fails due to truncation"""
value = 'x' * 1000
cursor.execute('create table t1(s varchar(800))')
with pytest.raises(pyodbc.Error):
cursor.execute('insert into t1 values (?)', value)
def test_row_equal(cursor: pyodbc.Cursor):
cursor.execute('create table t1(n int, s varchar(20))')
cursor.execute("insert into t1 values (1, 'test')")
row1 = cursor.execute('select n, s from t1').fetchone()
row2 = cursor.execute('select n, s from t1').fetchone()
assert row1 == row2
def test_row_gtlt(cursor: pyodbc.Cursor):
cursor.execute('create table t1(n int, s varchar(20))')
cursor.execute("insert into t1 values (1, 'test1')")
cursor.execute("insert into t1 values (1, 'test2')")
rows = cursor.execute('select n, s from t1 order by s').fetchall()
assert rows[0] < rows[1]
assert rows[0] <= rows[1]
assert rows[1] > rows[0]
assert rows[1] >= rows[0]
assert rows[0] != rows[1]
rows = list(rows)
rows.sort()
def test_context_manager_success():
"""Ensure `with` commits if an exception is not raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cnxn.commit()
with cnxn:
cursor.execute('insert into t1 values (1)')
rows = cursor.execute('select n from t1').fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"""Ensure `with` rolls back if an exception is raised"""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute('create table t1(n int)')
cursor.execute('insert into t1 values (1)')
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute('insert into t1 values (2)')
cursor.execute('delete from bogus')
cursor.execute('select max(n) from t1')
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
value = cursor.execute('select ?', None).fetchone()[0]
assert value is None
def test_large_update_nodata(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a varbinary(max))')
hundredkb = b'x' * 100 * 1024
cursor.execute('update t1 set a=? where 1=0', (hundredkb,))
def test_func_param(cursor: pyodbc.Cursor):
try:
cursor.execute('drop function func1')
except:
pass
cursor.execute(
"""
create function func1 (@testparam varchar(4))
returns @rettest table (param varchar(4))
as
begin
insert @rettest
select @testparam
return
end
"""
)
cursor.commit()
value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]
assert value == 'test'
def test_columns(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(
f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
"""
)
col_count = len([col.column_name for col in cursor.columns(table_name)]
)
assert col_count == 1
cursor.execute(f'drop table {table_name}')
<|reserved_special_token_0|>
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
v = 'x 🌜 z'
cursor.execute('create table t1(s nvarchar(100))')
cursor.execute('insert into t1 values (?)', v)
result = cursor.execute('select s from t1').fetchone()[0]
assert result == v
def test_emoticons_as_literal(cursor: pyodbc.Cursor):
v = 'x 🌜 z'
cursor.execute('create table t1(s nvarchar(100))')
cursor.execute(f"insert into t1 values (N'{v}')")
result = cursor.execute('select s from t1').fetchone()[0]
assert result == v
def _test_tvp(cursor: pyodbc.Cursor, diff_schema):
pyodbc.native_uuid = True
procname = 'SelectTVP'
typename = 'TestTVP'
if diff_schema:
schemaname = 'myschema'
procname = schemaname + '.' + procname
typenameonly = typename
typename = schemaname + '.' + typename
try:
cursor.execute('drop procedure ' + procname)
except:
pass
try:
cursor.execute('drop type ' + typename)
except:
pass
if diff_schema:
try:
cursor.execute('drop schema ' + schemaname)
except:
pass
cursor.commit()
if diff_schema:
cursor.execute('CREATE SCHEMA myschema')
cursor.commit()
cursor.execute(
f"""
CREATE TYPE {typename} AS TABLE(
c01 VARCHAR(255),
c02 VARCHAR(MAX),
c03 VARBINARY(255),
c04 VARBINARY(MAX),
c05 BIT,
c06 DATE,
c07 TIME,
c08 DATETIME2(5),
c09 BIGINT,
c10 FLOAT,
c11 NUMERIC(38, 24),
c12 UNIQUEIDENTIFIER)
"""
)
cursor.commit()
cursor.execute(
f"""
CREATE PROCEDURE {procname} @TVP {typename} READONLY
AS SELECT * FROM @TVP;
"""
)
cursor.commit()
VERY_LONG_LEN = 2000000
long_string = ''.join(chr(i) for i in range(32, 127))
long_bytearray = bytes(list(range(255)))
very_long_string = long_string * (VERY_LONG_LEN // len(long_string))
very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(
long_bytearray))
params = [('abc', 'abc', bytes([209, 206, 250, 206]), bytes([15, 241,
206, 202, 254]), True, date(1997, 8, 29), time(9, 13, 39), datetime
(2018, 11, 13, 13, 33, 26, 298420), 1234567, 3.14, Decimal(
'31234567890123.141243449787580175325274'), uuid.UUID(
'4fe34a93-e574-04cc-200a-353f0d1770b1')), ('', '', bytes([0, 1, 2,
3, 4]), bytes([0, 1, 2, 3, 4, 5]), False, date(1, 1, 1), time(0, 0,
0), datetime(1, 1, 1, 0, 0, 0, 0), -9223372036854775808, -1.79e+308,
Decimal('0.000000000000000000000001'), uuid.UUID(
'33f7504c-2bac-1b83-01d1-7434a7ba6a17')), (long_string,
very_long_string, bytes(long_bytearray), bytes(very_long_bytearray),
True, date(9999, 12, 31), time(23, 59, 59), datetime(9999, 12, 31,
23, 59, 59, 999990), 9223372036854775807, 1.79e+308, Decimal(
'99999999999999.999999999999999999999999'), uuid.UUID(
'ffffffff-ffff-ffff-ffff-ffffffffffff'))]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = [tuple(row) for row in cursor.execute(
f'exec {procname} ?', p1).fetchall()]
for row, param in zip(result_array, params):
if row != param:
for r, p in zip(row, param):
assert r == p
params = []
p1 = [params]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = cursor.execute(f'exec {procname} ?', p1).fetchall()
assert result_array == params
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp(cursor: pyodbc.Cursor):
_test_tvp(cursor, False)
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
def get_sqlserver_version(cursor: pyodbc.Cursor):
"""
Returns the major version: 8-->2000, 9-->2005, 10-->2008
"""
cursor.execute("exec master..xp_msver 'ProductVersion'")
row = cursor.fetchone()
return int(row.Character_Value.split('.', 1)[0])
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = remaining + len(seed) - 1 // len(seed)
v += seed * c
if encoding:
v = v.encode(encoding)
v = v[:length]
return v
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, uuid, re, sys
from decimal import Decimal
from datetime import date, time, datetime
from functools import lru_cache
from typing import Iterator
import pyodbc, pytest
# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be
# smarter than everyone. I worked with their APIs for since before "OLE" and it has always
# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure.
# Really? Less secure than what? The next hack someone is going to use. Do the straight
# forward thing and explain how to secure it. it isn't their business how I deploy and secure.
#
# For every other DB we use a single default DSN but you can pass your own via an environment
# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is
# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename
# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual.
CNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver')
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before)
DRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME)
IS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE))
IS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\.dll)', DRIVER, re.IGNORECASE))
def _get_sqlserver_year():
"""
Returns the release year of the current version of SQL Server, used to skip tests for
features that are not supported. If the current DB is not SQL Server, 0 is returned.
"""
# We used to use the major version, but most documentation on the web refers to the year
# (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply.
if not IS_MSODBCSQL:
return 0
cnxn = connect()
cursor = cnxn.cursor()
row = cursor.execute("exec master..xp_msver 'ProductVersion'").fetchone()
major = row.Character_Value.split('.', 1)[0]
return {
# https://sqlserverbuilds.blogspot.com/
'8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014,
'13': 2016, '14': 2017, '15': 2019, '16': 2022
}[major]
SQLSERVER_YEAR = _get_sqlserver_year()
@pytest.fixture()
def cursor() -> Iterator[pyodbc.Cursor]:
cnxn = connect()
cur = cnxn.cursor()
cur.execute("drop table if exists t1")
cur.execute("drop table if exists t2")
cur.execute("drop table if exists t3")
cnxn.commit()
yield cur
if not cnxn.closed:
cur.close()
cnxn.close()
def test_text(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'text')
def test_varchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varchar')
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005')
def test_unicode_longmax(cursor: pyodbc.Cursor):
# Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes
cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))")
def test_char(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF,
0x123456789])
def test_overflow_int(cursor: pyodbc.Cursor):
# python allows integers of any size, bigger than an 8 byte int can contain
input = 9999999999999999999999999999999999999
cursor.execute("create table t1(d bigint)")
with pytest.raises(OverflowError):
cursor.execute("insert into t1 values (?)", input)
result = cursor.execute("select * from t1").fetchall()
assert result == []
def test_float(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345])
def test_non_numeric_float(cursor: pyodbc.Cursor):
cursor.execute("create table t1(d float)")
for input in (float('+Infinity'), float('-Infinity'), float('NaN')):
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 values (?)", input)
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
def test_getinfo_string():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
assert isinstance(value, str)
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
# Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without
# fetches seem to confuse the driver.
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
def test_decode_meta(cursor: pyodbc.Cursor):
"""
Ensure column names with non-ASCII characters are converted using the configured encodings.
"""
# This is from GitHub issue #190
cursor.execute("create table t1(a int)")
cursor.execute("insert into t1 values (1)")
cursor.execute('select a as "Tipología" from t1')
assert cursor.description[0][0] == "Tipología"
def test_exc_integrity(cursor: pyodbc.Cursor):
"Make sure an IntegretyError is raised"
# This is really making sure we are properly encoding and comparing the SQLSTATEs.
cursor.execute("create table t1(s1 varchar(10) primary key)")
cursor.execute("insert into t1 values ('one')")
with pytest.raises(pyodbc.IntegrityError):
cursor.execute("insert into t1 values ('one')")
def test_multiple_bindings(cursor: pyodbc.Cursor):
"More than one bind and select on a cursor"
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t1 values (?)", 2)
cursor.execute("insert into t1 values (?)", 3)
for _ in range(3):
cursor.execute("select n from t1 where n < ?", 10)
cursor.execute("select n from t1 where n < 3")
def test_different_bindings(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int)")
cursor.execute("create table t2(d datetime)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t2 values (?)", datetime.now())
SMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000]
LARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024]
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f"create table t1(c1 {datatype})")
else:
maxlen = lengths[-1]
cursor.execute(f"create table t1(c1 {datatype}({maxlen}))")
for length in lengths:
cursor.execute("delete from t1")
encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute("insert into t1 values(?)", value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def _test_scalar(cursor: pyodbc.Cursor, datatype, values):
"""
A simple test wrapper for types that are identical when written and read.
"""
cursor.execute(f"create table t1(c1 {datatype})")
for value in values:
cursor.execute("delete from t1")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select c1 from t1").fetchone()[0]
assert v == value
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
def test_nonnative_uuid(cursor: pyodbc.Cursor):
# The default is False meaning we should return a string. Note that
# SQL Server seems to always return uppercase.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = False
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, str)
assert result == str(value).upper()
pyodbc.native_uuid = True
def test_native_uuid(cursor: pyodbc.Cursor):
# When true, we should return a uuid.UUID object.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = True
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
def test_nextset(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
for i in range(4):
cursor.execute("insert into t1(i) values(?)", i)
cursor.execute(
"""
select i from t1 where i < 2 order by i;
select i from t1 where i >= 2 order by i
""")
for i, row in enumerate(cursor):
assert i == row.i
assert cursor.nextset()
for i, row in enumerate(cursor):
assert i + 2 == row.i
@pytest.mark.skipif(IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
def test_fixed_unicode(cursor: pyodbc.Cursor):
value = "t\xebsting"
cursor.execute("create table t1(s nchar(7))")
cursor.execute("insert into t1 values(?)", "t\xebsting")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_chinese(cursor: pyodbc.Cursor):
v = '我的'
cursor.execute("SELECT N'我的' AS [Name]")
row = cursor.fetchone()
assert row[0] == v
cursor.execute("SELECT N'我的' AS [Name]")
rows = cursor.fetchall()
assert rows[0][0] == v
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute("create table t1(b bit)")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select b from t1").fetchone()[0]
assert isinstance(v, bool)
assert v == value
def test_decimal(cursor: pyodbc.Cursor):
# From test provided by planders (thanks!) in Issue 91
for (precision, scale, negative) in [
(1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True),
(6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True),
(38, 10, True), (38, 38, True)]:
try:
cursor.execute("drop table t1")
except:
pass
cursor.execute(f"create table t1(d decimal({precision}, {scale}))")
# Construct a decimal that uses the maximum precision and scale.
sign = negative and '-' or ''
before = '9' * (precision - scale)
after = scale and ('.' + '9' * scale) or ''
decStr = f'{sign}{before}{after}'
value = Decimal(decStr)
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select d from t1").fetchone()[0]
assert v == value
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7
cursor.execute("create table t1(d decimal(10, 2))")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select * from t1").fetchone()[0]
assert result == value
def test_subquery_params(cursor: pyodbc.Cursor):
"""Ensure parameter markers work in a subquery"""
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
row = cursor.execute("""
select x.id
from (
select id
from t1
where s = ?
and id between ? and ?
) x
""", 'test', 1, 10).fetchone()
assert row is not None
assert row[0] == 1
def test_close_cnxn():
"""Make sure using a Cursor after closing its connection doesn't crash."""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("drop table if exists t1")
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
cursor.execute("select * from t1")
cnxn.close()
# Now that the connection is closed, we expect an exception. (If the code attempts to use
# the HSTMT, we'll get an access violation instead.)
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("select * from t1")
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_string_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_fixed_str(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_empty_unicode(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_unicode_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "1")
row = cursor.execute("select * from t1").fetchone()
assert row[0] == "1"
assert row[-1] == "1"
def test_version():
assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc.
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute("create table t1(d date)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select d from t1").fetchone()[0]
assert isinstance(result, date)
assert value == result
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Time not supported until 2008?')
def test_time(cursor: pyodbc.Cursor):
value = datetime.now().time()
# We aren't yet writing values using the new extended time type so the value written to the
# database is only down to the second.
value = value.replace(microsecond=0)
cursor.execute("create table t1(t time)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select t from t1").fetchone()[0]
assert isinstance(result, time)
assert value == result
def test_datetime(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most
# granular datetime supported is xxx000.
value = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction_rounded(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc
# rounds down to what the database supports.
full = datetime(2007, 1, 15, 3, 4, 5, 123456)
rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", full)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert rounded == result
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime2)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_sp_results(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
""")
rows = cursor.execute("exec proc1").fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
# Note: I've used "set nocount on" so that we don't get the number of rows deleted from
# #tmptable. If you don't do this, you'd need to call nextset() once to skip it.
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
""")
cursor.execute("exec proc1")
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_vartbl(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)
insert into @tmptbl
select top 10 name, id, xtype, refdate
from sysobjects
select * from @tmptbl
""")
cursor.execute("exec proc1")
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_with_dates(cursor: pyodbc.Cursor):
# Reported in the forums that passing two datetimes to a stored procedure doesn't work.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
""")
cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0 # 0 years apart
def test_sp_with_none(cursor: pyodbc.Cursor):
# Reported in the forums that passing None caused an error.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@x varchar(20))
AS
declare @y varchar(20)
set @y = @x
select @y
""")
cursor.execute("exec test_sp ?", None)
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] is None # 0 years apart
#
# rowcount
#
def test_rowcount_delete(cursor: pyodbc.Cursor):
assert cursor.rowcount == -1
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("delete from t1")
assert cursor.rowcount == count
def test_rowcount_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code. On the other hand, we could hardcode a zero return value.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
cursor.execute("delete from t1")
assert cursor.rowcount == 0
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("select * from t1")
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
def test_rowcount_reset(cursor: pyodbc.Cursor):
"Ensure rowcount is reset after DDL"
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
assert cursor.rowcount == 1
cursor.execute("create table t2(i int)")
ddl_rowcount = (0 if IS_FREEDTS else -1)
assert cursor.rowcount == ddl_rowcount
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_select(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("select * from t1")
assert v == cursor
def table_with_spaces(cursor: pyodbc.Cursor):
"Ensure we can select using [x z] syntax"
try:
cursor.execute("create table [test one](int n)")
cursor.execute("insert into [test one] values(1)")
cursor.execute("select * from [test one]")
v = cursor.fetchone()[0]
assert v == 1
finally:
cursor.rollback()
def test_lower_case():
"Ensure pyodbc.lowercase forces returned column names to lowercase."
try:
pyodbc.lowercase = True
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(Abc int, dEf int)")
cursor.execute("select * from t1")
names = [t[0] for t in cursor.description]
names.sort()
assert names == ["abc", "def"]
finally:
# Put it back so other tests don't fail.
pyodbc.lowercase = False
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute("create table t1(a int, b char(3))")
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute("select * from t1").fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
# A project was failing to create temporary tables via select into.
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
cursor.execute("select s into t2 from t1")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
def test_executemany(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b varchar(10))")
params = [(i, str(i)) for i in range(1, 6)]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_one(cursor: pyodbc.Cursor):
"Pass executemany a single sequence"
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, "test")]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_dae_0(cursor: pyodbc.Cursor):
"""
DAE for 0-length value
"""
cursor.execute("create table t1(a nvarchar(max))")
cursor.fast_executemany = True
cursor.executemany("insert into t1(a) values(?)", [['']])
assert cursor.execute("select a from t1").fetchone()[0] == ''
cursor.fast_executemany = False
def test_executemany_failure(cursor: pyodbc.Cursor):
"""
Ensure that an exception is raised if one query in an executemany fails.
"""
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, 'good'),
('error', 'not an int'),
(3, 'good')]
with pytest.raises(pyodbc.Error):
cursor.executemany("insert into t1(a, b) value (?, ?)", params)
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d int)")
cursor.execute("insert into t1 values(1,2,3,4)")
row = cursor.execute("select * from t1").fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d varchar(50))")
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute("select * from t1").fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == "(1, 2, 3)"
result = str(row[:1])
assert result == "(1,)"
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))")
cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3)
row = cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone()
assert row.both == v2 + v3
def test_view_select(cursor: pyodbc.Cursor):
# Reported in forum: Can't select from a view? I think I do this a lot, but another test
# never hurts.
# Create a table (t1) with 3 rows and a view (t2) into it.
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))")
for i in range(3):
cursor.execute("insert into t1(c2) values (?)", f"string{i}")
cursor.execute("create view t2 as select * from t1")
# Select from the view
cursor.execute("select * from t2")
rows = cursor.fetchall()
assert rows is not None
assert len(rows) == 3
def test_autocommit():
cnxn = connect()
assert cnxn.autocommit is False
cnxn = None
cnxn = connect(autocommit=True)
assert cnxn.autocommit is True
cnxn.autocommit = False
assert cnxn.autocommit is False
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute("drop procedure pyodbctest")
cursor.commit()
except:
pass
cursor.execute("create table t1(s varchar(10))")
cursor.execute("insert into t1 values(?)", "testing")
cursor.execute("""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
""")
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
# Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3.
cursor.execute("create table t1(id int)")
for i in range(1, 5):
cursor.execute("insert into t1 values(?)", i)
cursor.execute("select id from t1 order by id")
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
def test_timeout():
cnxn = connect()
assert cnxn.timeout == 0 # defaults to zero (off)
cnxn.timeout = 30
assert cnxn.timeout == 30
cnxn.timeout = 0
assert cnxn.timeout == 0
def test_sets_execute(cursor: pyodbc.Cursor):
# Only lists and tuples are allowed.
cursor.execute("create table t1 (word varchar (100))")
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 (word) values (?)", words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany("insert into t1 (word) values (?)", words)
def test_row_execute(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to execute"
cursor.execute("create table t1(n int, s varchar(10))")
cursor.execute("insert into t1 values (1, 'a')")
row = cursor.execute("select n, s from t1").fetchone()
assert row
cursor.execute("create table t2(n int, s varchar(10))")
cursor.execute("insert into t2 values (?, ?)", row)
def test_row_executemany(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to executemany"
cursor.execute("create table t1(n int, s varchar(10))")
for i in range(3):
cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a') + i))
rows = cursor.execute("select n, s from t1").fetchall()
assert len(rows) != 0
cursor.execute("create table t2(n int, s varchar(10))")
cursor.executemany("insert into t2 values (?, ?)", rows)
def test_description(cursor: pyodbc.Cursor):
"Ensure cursor.description is correct"
cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))")
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute("select * from t1")
# (I'm not sure the precision of an int is constant across different versions, bits, so I'm
# hand checking the items I do know.
# int
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0 # scale
assert t[6] is True # nullable
# varchar(8)
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8 # precision
assert t[5] == 0 # scale
assert t[6] is True # nullable
# decimal(5, 2)
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5 # precision
assert t[5] == 2 # scale
assert t[6] is True # nullable
def test_cursor_messages_with_print(cursor: pyodbc.Cursor):
"""
Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.
"""
assert not cursor.messages
# SQL Server PRINT statements are never more than 8000 characters
# https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks
for msg in ('hello world', 'ABCDEFGHIJ' * 800):
cursor.execute(f"PRINT '{msg}'")
messages = cursor.messages
assert isinstance(messages, list)
assert len(messages) == 1
assert isinstance(messages[0], tuple)
assert len(messages[0]) == 2
assert isinstance(messages[0][0], str)
assert isinstance(messages[0][1], str)
assert '[01000] (0)' == messages[0][0]
assert messages[0][1].endswith(msg)
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute("""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
""")
# The messages will look like:
#
# [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a
# result set 1: messages, rows
cursor.execute("exec test_cursor_messages")
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 1a', 'Message 1b']
# result set 2: rows, no messages
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
# result set 3: messages, no rows
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 2a', 'Message 2b']
# result set 4: no rows, no messages
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
def test_none_param(cursor: pyodbc.Cursor):
"Ensure None can be used for params other than the first"
# Some driver/db versions would fail if NULL was not the first parameter because
# SQLDescribeParam (only used with NULL) could not be used after the first call to
# SQLBindParameter. This means None always worked for the first column, but did not work
# for later columns.
#
# If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked.
# However, binary/varbinary won't allow an implicit conversion.
cursor.execute("create table t1(n int, blob varbinary(max))")
cursor.execute("insert into t1 values (1, newid())")
row = cursor.execute("select * from t1").fetchone()
assert row.n == 1
assert isinstance(row.blob, bytes)
sql = "update t1 set n=?, blob=?"
try:
cursor.execute(sql, 2, None)
except pyodbc.DataError:
if IS_FREEDTS:
# cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc
# can't call SQLDescribeParam to get the correct parameter type. This can lead to
# errors being returned from SQL Server when sp_prepexec is called, e.g., "Implicit
# conversion from data type varchar to varbinary(max) is not allowed."
#
# So at least verify that the user can manually specify the parameter type
cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])
cursor.execute(sql, 2, None)
else:
raise
row = cursor.execute("select * from t1").fetchone()
assert row.n == 2
assert row.blob is None
def test_output_conversion():
def convert1(value):
# The value is the raw bytes (as a bytes object) read from the
# database. We'll simply add an X at the beginning at the end.
return 'X' + value.decode('latin1') + 'X'
def convert2(value):
# Same as above, but add a Y at the beginning at the end.
return 'Y' + value.decode('latin1') + 'Y'
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int, v varchar(10))")
cursor.execute("insert into t1 values (1, '123.45')")
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
# Clear all conversions and try again. There should be no Xs this time.
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Same but clear using remove_output_converter.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Clear via add_output_converter, passing None for the converter function.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# retrieve and temporarily replace converter (get_output_converter)
#
# case_1: converter already registered
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is not None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
#
# case_2: no converter already registered
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
def test_too_large(cursor: pyodbc.Cursor):
"""Ensure error raised if insert fails due to truncation"""
value = 'x' * 1000
cursor.execute("create table t1(s varchar(800))")
with pytest.raises(pyodbc.Error):
cursor.execute("insert into t1 values (?)", value)
def test_row_equal(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test')")
row1 = cursor.execute("select n, s from t1").fetchone()
row2 = cursor.execute("select n, s from t1").fetchone()
assert row1 == row2
def test_row_gtlt(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test1')")
cursor.execute("insert into t1 values (1, 'test2')")
rows = cursor.execute("select n, s from t1 order by s").fetchall()
assert rows[0] < rows[1]
assert rows[0] <= rows[1]
assert rows[1] > rows[0]
assert rows[1] >= rows[0]
assert rows[0] != rows[1]
rows = list(rows)
rows.sort() # uses <
def test_context_manager_success():
"Ensure `with` commits if an exception is not raised"
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int)")
cnxn.commit()
with cnxn:
cursor.execute("insert into t1 values (1)")
rows = cursor.execute("select n from t1").fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"Ensure `with` rolls back if an exception is raised"
cnxn = connect()
cursor = cnxn.cursor()
# We'll insert a row and commit it. Then we'll insert another row followed by an
# exception.
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (1)")
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute("insert into t1 values (2)")
cursor.execute("delete from bogus")
cursor.execute("select max(n) from t1")
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
# From issue 129
value = cursor.execute("select ?", None).fetchone()[0]
assert value is None
def test_large_update_nodata(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a varbinary(max))')
hundredkb = b'x' * 100 * 1024
cursor.execute('update t1 set a=? where 1=0', (hundredkb,))
def test_func_param(cursor: pyodbc.Cursor):
try:
cursor.execute("drop function func1")
except:
pass
cursor.execute("""
create function func1 (@testparam varchar(4))
returns @rettest table (param varchar(4))
as
begin
insert @rettest
select @testparam
return
end
""")
cursor.commit()
value = cursor.execute("select * from func1(?)", 'test').fetchone()[0]
assert value == 'test'
def test_columns(cursor: pyodbc.Cursor):
# When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error
#
# Error: TypeError: argument 2 must be str, not None
#
# I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an
# optional string keyword when calling indirectly.
cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))")
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
# Now do the same, but specifically pass in None to one of the keywords. Old versions
# were parsing arguments incorrectly and would raise an error. (This crops up when
# calling indirectly like columns(*args, **kwargs) which aiodbc does.)
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
""")
col_count = len([col.column_name for col in cursor.columns(table_name)])
assert col_count == 1
cursor.execute(f"drop table {table_name}")
def test_cancel(cursor: pyodbc.Cursor):
# I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with
# making sure SQLCancel is called correctly.
cursor.execute("select 1")
cursor.cancel()
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
# https://github.com/mkleehammer/pyodbc/issues/423
#
# When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number
# of characters. Ensure it works even with 4-byte characters.
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute("insert into t1 values (?)", v)
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def test_emoticons_as_literal(cursor: pyodbc.Cursor):
# similar to `test_emoticons_as_parameter`, above, except for Unicode literal
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
# FreeTDS ODBC issue fixed in version 1.1.23
# https://github.com/FreeTDS/freetds/issues/317
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute(f"insert into t1 values (N'{v}')")
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def _test_tvp(cursor: pyodbc.Cursor, diff_schema):
# Test table value parameters (TVP). I like the explanation here:
#
# https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/
#
# "At a high level the TVP allows you to populate a table declared as a T-SQL variable,
# then pass that table as a parameter to a stored procedure or function."
#
# "The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE,
# DELETE) against the TVP; you can only reference it in a SELECT statement."
#
# In this test we'll create a table, pass it to a stored procedure, and have the stored
# procedure simply return the rows from the TVP.
#
# Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm
# not sure I like that as it is very generic and specific to SQL Server. It would be wiser
# to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary`
# object.
pyodbc.native_uuid = True
# This is the default, but we'll reset it in case a previous test fails to.
procname = 'SelectTVP'
typename = 'TestTVP'
if diff_schema:
schemaname = 'myschema'
procname = schemaname + '.' + procname
typenameonly = typename
typename = schemaname + '.' + typename
# (Don't use "if exists" since older SQL Servers don't support it.)
try:
cursor.execute("drop procedure " + procname)
except:
pass
try:
cursor.execute("drop type " + typename)
except:
pass
if diff_schema:
try:
cursor.execute("drop schema " + schemaname)
except:
pass
cursor.commit()
if diff_schema:
cursor.execute("CREATE SCHEMA myschema")
cursor.commit()
cursor.execute(
f"""
CREATE TYPE {typename} AS TABLE(
c01 VARCHAR(255),
c02 VARCHAR(MAX),
c03 VARBINARY(255),
c04 VARBINARY(MAX),
c05 BIT,
c06 DATE,
c07 TIME,
c08 DATETIME2(5),
c09 BIGINT,
c10 FLOAT,
c11 NUMERIC(38, 24),
c12 UNIQUEIDENTIFIER)
""")
cursor.commit()
cursor.execute(
f"""
CREATE PROCEDURE {procname} @TVP {typename} READONLY
AS SELECT * FROM @TVP;
""")
cursor.commit()
# The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than
# the loop we had before.
VERY_LONG_LEN = 2000000
long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters
long_bytearray = bytes(list(range(255)))
very_long_string = long_string * (VERY_LONG_LEN // len(long_string))
very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray))
params = [
# Three rows with all of the types in the table defined above.
(
'abc', 'abc',
bytes([0xD1, 0xCE, 0xFA, 0xCE]),
bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True,
date(1997, 8, 29), time(9, 13, 39),
datetime(2018, 11, 13, 13, 33, 26, 298420),
1234567, 3.14, Decimal('31234567890123.141243449787580175325274'),
uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'),
),
(
'', '',
bytes([0x00, 0x01, 0x02, 0x03, 0x04]),
bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False,
date(1, 1, 1), time(0, 0, 0),
datetime(1, 1, 1, 0, 0, 0, 0),
-9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'),
uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'),
),
(
long_string, very_long_string,
bytes(long_bytearray), bytes(very_long_bytearray), True,
date(9999, 12, 31), time(23, 59, 59),
datetime(9999, 12, 31, 23, 59, 59, 999990),
9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'),
uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),
)
]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = [tuple(row) for row in cursor.execute(f"exec {procname} ?", p1).fetchall()]
# The values make it very difficult to troubleshoot if something is wrong, so instead of
# asserting they are the same, we'll walk them if there is a problem to identify which is
# wrong.
for row, param in zip(result_array, params):
if row != param:
for r, p in zip(row, param):
assert r == p
# Now test with zero rows.
params = []
p1 = [params]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = cursor.execute(f"exec {procname} ?", p1).fetchall()
assert result_array == params
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp(cursor: pyodbc.Cursor):
_test_tvp(cursor, False)
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
def get_sqlserver_version(cursor: pyodbc.Cursor):
"""
Returns the major version: 8-->2000, 9-->2005, 10-->2008
"""
cursor.execute("exec master..xp_msver 'ProductVersion'")
row = cursor.fetchone()
return int(row.Character_Value.split('.', 1)[0])
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
# Put non-ASCII characters at the front so we don't end up chopping one in half in a
# multi-byte encoding like UTF-8.
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = (remaining + len(seed) - 1 // len(seed))
v += seed * c
if encoding:
v = v.encode(encoding)
# We chop *after* encoding because if we are encoding then we want bytes.
v = v[:length]
return v
|
flexible
|
{
"blob_id": "51358ac7d4fc093f8291cfd9f098e3ac3db86cce",
"index": 8282,
"step-1": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\n<mask token>\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\n<mask token>\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\n<mask token>\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\n<mask token>\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\n<mask token>\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\n<mask token>\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\n<mask token>\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-2": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\n<mask token>\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\n<mask token>\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\n<mask token>\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\n<mask token>\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\n<mask token>\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-3": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n cursor.execute('create table t1(a int)')\n cursor.execute('insert into t1 values (1)')\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == 'Tipología'\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('drop table if exists t1')\n cursor.execute('create table t1(id integer, s varchar(20))')\n cursor.execute('insert into t1 values (?,?)', 1, 'test')\n cursor.execute('select * from t1')\n cnxn.close()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('select * from t1')\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', full)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('delete from t1')\n assert cursor.rowcount == count\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can select using [x z] syntax\"\"\"\n try:\n cursor.execute('create table [test one](int n)')\n cursor.execute('insert into [test one] values(1)')\n cursor.execute('select * from [test one]')\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(i, str(i)) for i in range(1, 6)]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\n<mask token>\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to execute\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute('select n, s from t1').fetchone()\n assert row\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.execute('insert into t2 values (?, ?)', row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute('create table t1(s varchar(800))')\n with pytest.raises(pyodbc.Error):\n cursor.execute('insert into t1 values (?)', value)\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop function func1')\n except:\n pass\n cursor.execute(\n \"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\"\n )\n cursor.commit()\n value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-4": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\[email protected]()\ndef cursor() ->Iterator[pyodbc.Cursor]:\n cnxn = connect()\n cur = cnxn.cursor()\n cur.execute('drop table if exists t1')\n cur.execute('drop table if exists t2')\n cur.execute('drop table if exists t3')\n cnxn.commit()\n yield cur\n if not cnxn.closed:\n cur.close()\n cnxn.close()\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\ndef test_varchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varchar')\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\ndef test_char(cursor: pyodbc.Cursor):\n value = 'testing'\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\ndef test_float(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, \n 0.00012345])\n\n\ndef test_non_numeric_float(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(d float)')\n for input in (float('+Infinity'), float('-Infinity'), float('NaN')):\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 values (?)', input)\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\ndef test_getinfo_string():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)\n assert isinstance(value, str)\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n cursor.execute('create table t1(a int)')\n cursor.execute('insert into t1 values (1)')\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == 'Tipología'\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\ndef test_different_bindings(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int)')\n cursor.execute('create table t2(d datetime)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t2 values (?)', datetime.now())\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef _test_scalar(cursor: pyodbc.Cursor, datatype, values):\n \"\"\"\n A simple test wrapper for types that are identical when written and read.\n \"\"\"\n cursor.execute(f'create table t1(c1 {datatype})')\n for value in values:\n cursor.execute('delete from t1')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select c1 from t1').fetchone()[0]\n assert v == value\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\ndef test_nonnative_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = False\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, str)\n assert result == str(value).upper()\n pyodbc.native_uuid = True\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\ndef test_fixed_unicode(cursor: pyodbc.Cursor):\n value = 'tësting'\n cursor.execute('create table t1(s nchar(7))')\n cursor.execute('insert into t1 values(?)', 'tësting')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n assert v == value\n\n\ndef test_chinese(cursor: pyodbc.Cursor):\n v = '我的'\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n row = cursor.fetchone()\n assert row[0] == v\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n rows = cursor.fetchall()\n assert rows[0][0] == v\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('drop table if exists t1')\n cursor.execute('create table t1(id integer, s varchar(20))')\n cursor.execute('insert into t1 values (?,?)', 1, 'test')\n cursor.execute('select * from t1')\n cnxn.close()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('select * from t1')\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_fixed_str(cursor: pyodbc.Cursor):\n value = 'testing'\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n assert v == value\n\n\n<mask token>\n\n\ndef test_empty_unicode_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s nvarchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Time not supported until 2008?')\ndef test_time(cursor: pyodbc.Cursor):\n value = datetime.now().time()\n value = value.replace(microsecond=0)\n cursor.execute('create table t1(t time)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select t from t1').fetchone()[0]\n assert isinstance(result, time)\n assert value == result\n\n\ndef test_datetime(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', full)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_vartbl(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)\n\n insert into @tmptbl\n select top 10 name, id, xtype, refdate\n from sysobjects\n\n select * from @tmptbl\n \"\"\"\n )\n cursor.execute('exec proc1')\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\ndef test_sp_with_none(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@x varchar(20))\n AS\n declare @y varchar(20)\n set @y = @x\n select @y\n \"\"\"\n )\n cursor.execute('exec test_sp ?', None)\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] is None\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('delete from t1')\n assert cursor.rowcount == count\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('select * from t1')\n assert v == cursor\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can select using [x z] syntax\"\"\"\n try:\n cursor.execute('create table [test one](int n)')\n cursor.execute('insert into [test one] values(1)')\n cursor.execute('select * from [test one]')\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\ndef test_lower_case():\n \"\"\"Ensure pyodbc.lowercase forces returned column names to lowercase.\"\"\"\n try:\n pyodbc.lowercase = True\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(Abc int, dEf int)')\n cursor.execute('select * from t1')\n names = [t[0] for t in cursor.description]\n names.sort()\n assert names == ['abc', 'def']\n finally:\n pyodbc.lowercase = False\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(i, str(i)) for i in range(1, 6)]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_one(cursor: pyodbc.Cursor):\n \"\"\"Pass executemany a single sequence\"\"\"\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(1, 'test')]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\ndef test_view_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(c1 int identity(1, 1), c2 varchar(50))')\n for i in range(3):\n cursor.execute('insert into t1(c2) values (?)', f'string{i}')\n cursor.execute('create view t2 as select * from t1')\n cursor.execute('select * from t2')\n rows = cursor.fetchall()\n assert rows is not None\n assert len(rows) == 3\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to execute\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute('select n, s from t1').fetchone()\n assert row\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.execute('insert into t2 values (?, ?)', row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\ndef test_cursor_messages_with_print(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.\n \"\"\"\n assert not cursor.messages\n for msg in ('hello world', 'ABCDEFGHIJ' * 800):\n cursor.execute(f\"PRINT '{msg}'\")\n messages = cursor.messages\n assert isinstance(messages, list)\n assert len(messages) == 1\n assert isinstance(messages[0], tuple)\n assert len(messages[0]) == 2\n assert isinstance(messages[0][0], str)\n assert isinstance(messages[0][1], str)\n assert '[01000] (0)' == messages[0][0]\n assert messages[0][1].endswith(msg)\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute('create table t1(s varchar(800))')\n with pytest.raises(pyodbc.Error):\n cursor.execute('insert into t1 values (?)', value)\n\n\ndef test_row_equal(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int, s varchar(20))')\n cursor.execute(\"insert into t1 values (1, 'test')\")\n row1 = cursor.execute('select n, s from t1').fetchone()\n row2 = cursor.execute('select n, s from t1').fetchone()\n assert row1 == row2\n\n\ndef test_row_gtlt(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int, s varchar(20))')\n cursor.execute(\"insert into t1 values (1, 'test1')\")\n cursor.execute(\"insert into t1 values (1, 'test2')\")\n rows = cursor.execute('select n, s from t1 order by s').fetchall()\n assert rows[0] < rows[1]\n assert rows[0] <= rows[1]\n assert rows[1] > rows[0]\n assert rows[1] >= rows[0]\n assert rows[0] != rows[1]\n rows = list(rows)\n rows.sort()\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop function func1')\n except:\n pass\n cursor.execute(\n \"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\"\n )\n cursor.commit()\n value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef _test_tvp(cursor: pyodbc.Cursor, diff_schema):\n pyodbc.native_uuid = True\n procname = 'SelectTVP'\n typename = 'TestTVP'\n if diff_schema:\n schemaname = 'myschema'\n procname = schemaname + '.' + procname\n typenameonly = typename\n typename = schemaname + '.' + typename\n try:\n cursor.execute('drop procedure ' + procname)\n except:\n pass\n try:\n cursor.execute('drop type ' + typename)\n except:\n pass\n if diff_schema:\n try:\n cursor.execute('drop schema ' + schemaname)\n except:\n pass\n cursor.commit()\n if diff_schema:\n cursor.execute('CREATE SCHEMA myschema')\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE TYPE {typename} AS TABLE(\n c01 VARCHAR(255),\n c02 VARCHAR(MAX),\n c03 VARBINARY(255),\n c04 VARBINARY(MAX),\n c05 BIT,\n c06 DATE,\n c07 TIME,\n c08 DATETIME2(5),\n c09 BIGINT,\n c10 FLOAT,\n c11 NUMERIC(38, 24),\n c12 UNIQUEIDENTIFIER)\n \"\"\"\n )\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE PROCEDURE {procname} @TVP {typename} READONLY\n AS SELECT * FROM @TVP;\n \"\"\"\n )\n cursor.commit()\n VERY_LONG_LEN = 2000000\n long_string = ''.join(chr(i) for i in range(32, 127))\n long_bytearray = bytes(list(range(255)))\n very_long_string = long_string * (VERY_LONG_LEN // len(long_string))\n very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(\n long_bytearray))\n params = [('abc', 'abc', bytes([209, 206, 250, 206]), bytes([15, 241, \n 206, 202, 254]), True, date(1997, 8, 29), time(9, 13, 39), datetime\n (2018, 11, 13, 13, 33, 26, 298420), 1234567, 3.14, Decimal(\n '31234567890123.141243449787580175325274'), uuid.UUID(\n '4fe34a93-e574-04cc-200a-353f0d1770b1')), ('', '', bytes([0, 1, 2, \n 3, 4]), bytes([0, 1, 2, 3, 4, 5]), False, date(1, 1, 1), time(0, 0,\n 0), datetime(1, 1, 1, 0, 0, 0, 0), -9223372036854775808, -1.79e+308,\n Decimal('0.000000000000000000000001'), uuid.UUID(\n '33f7504c-2bac-1b83-01d1-7434a7ba6a17')), (long_string,\n very_long_string, bytes(long_bytearray), bytes(very_long_bytearray),\n True, date(9999, 12, 31), time(23, 59, 59), datetime(9999, 12, 31, \n 23, 59, 59, 999990), 9223372036854775807, 1.79e+308, Decimal(\n '99999999999999.999999999999999999999999'), uuid.UUID(\n 'ffffffff-ffff-ffff-ffff-ffffffffffff'))]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = [tuple(row) for row in cursor.execute(\n f'exec {procname} ?', p1).fetchall()]\n for row, param in zip(result_array, params):\n if row != param:\n for r, p in zip(row, param):\n assert r == p\n params = []\n p1 = [params]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = cursor.execute(f'exec {procname} ?', p1).fetchall()\n assert result_array == params\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp(cursor: pyodbc.Cursor):\n _test_tvp(cursor, False)\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os, uuid, re, sys\nfrom decimal import Decimal\nfrom datetime import date, time, datetime\nfrom functools import lru_cache\nfrom typing import Iterator\n\nimport pyodbc, pytest\n\n\n# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be\n# smarter than everyone. I worked with their APIs for since before \"OLE\" and it has always\n# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure.\n# Really? Less secure than what? The next hack someone is going to use. Do the straight\n# forward thing and explain how to secure it. it isn't their business how I deploy and secure.\n#\n# For every other DB we use a single default DSN but you can pass your own via an environment\n# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is\n# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename\n# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual.\n\nCNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver')\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before)\n\n\nDRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME)\n\nIS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE))\nIS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\\.dll)', DRIVER, re.IGNORECASE))\n\n\ndef _get_sqlserver_year():\n \"\"\"\n Returns the release year of the current version of SQL Server, used to skip tests for\n features that are not supported. If the current DB is not SQL Server, 0 is returned.\n \"\"\"\n # We used to use the major version, but most documentation on the web refers to the year\n # (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply.\n if not IS_MSODBCSQL:\n return 0\n cnxn = connect()\n cursor = cnxn.cursor()\n row = cursor.execute(\"exec master..xp_msver 'ProductVersion'\").fetchone()\n major = row.Character_Value.split('.', 1)[0]\n return {\n # https://sqlserverbuilds.blogspot.com/\n '8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014,\n '13': 2016, '14': 2017, '15': 2019, '16': 2022\n }[major]\n\n\nSQLSERVER_YEAR = _get_sqlserver_year()\n\n\[email protected]()\ndef cursor() -> Iterator[pyodbc.Cursor]:\n cnxn = connect()\n cur = cnxn.cursor()\n\n cur.execute(\"drop table if exists t1\")\n cur.execute(\"drop table if exists t2\")\n cur.execute(\"drop table if exists t3\")\n cnxn.commit()\n\n yield cur\n\n if not cnxn.closed:\n cur.close()\n cnxn.close()\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\ndef test_varchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varchar')\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n # Issue 188:\tSegfault when fetching NVARCHAR(MAX) data over 511 bytes\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\ndef test_char(cursor: pyodbc.Cursor):\n value = \"testing\"\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF,\n 0x123456789])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n # python allows integers of any size, bigger than an 8 byte int can contain\n input = 9999999999999999999999999999999999999\n cursor.execute(\"create table t1(d bigint)\")\n with pytest.raises(OverflowError):\n cursor.execute(\"insert into t1 values (?)\", input)\n result = cursor.execute(\"select * from t1\").fetchall()\n assert result == []\n\n\ndef test_float(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345])\n\n\ndef test_non_numeric_float(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(d float)\")\n for input in (float('+Infinity'), float('-Infinity'), float('NaN')):\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"insert into t1 values (?)\", input)\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\ndef test_getinfo_string():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)\n assert isinstance(value, str)\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without\n # fetches seem to confuse the driver.\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n # This is from GitHub issue #190\n cursor.execute(\"create table t1(a int)\")\n cursor.execute(\"insert into t1 values (1)\")\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == \"Tipología\"\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"Make sure an IntegretyError is raised\"\n # This is really making sure we are properly encoding and comparing the SQLSTATEs.\n cursor.execute(\"create table t1(s1 varchar(10) primary key)\")\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"More than one bind and select on a cursor\"\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"insert into t1 values (?)\", 1)\n cursor.execute(\"insert into t1 values (?)\", 2)\n cursor.execute(\"insert into t1 values (?)\", 3)\n for _ in range(3):\n cursor.execute(\"select n from t1 where n < ?\", 10)\n cursor.execute(\"select n from t1 where n < 3\")\n\n\ndef test_different_bindings(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"create table t2(d datetime)\")\n cursor.execute(\"insert into t1 values (?)\", 1)\n cursor.execute(\"insert into t2 values (?)\", datetime.now())\n\n\nSMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000]\nLARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024]\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n\n if datatype == 'text':\n cursor.execute(f\"create table t1(c1 {datatype})\")\n else:\n maxlen = lengths[-1]\n cursor.execute(f\"create table t1(c1 {datatype}({maxlen}))\")\n\n for length in lengths:\n cursor.execute(\"delete from t1\")\n\n encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n\n try:\n cursor.execute(\"insert into t1 values(?)\", value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef _test_scalar(cursor: pyodbc.Cursor, datatype, values):\n \"\"\"\n A simple test wrapper for types that are identical when written and read.\n \"\"\"\n cursor.execute(f\"create table t1(c1 {datatype})\")\n for value in values:\n cursor.execute(\"delete from t1\")\n cursor.execute(\"insert into t1 values (?)\", value)\n v = cursor.execute(\"select c1 from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\ndef test_nonnative_uuid(cursor: pyodbc.Cursor):\n # The default is False meaning we should return a string. Note that\n # SQL Server seems to always return uppercase.\n value = uuid.uuid4()\n cursor.execute(\"create table t1(n uniqueidentifier)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n pyodbc.native_uuid = False\n result = cursor.execute(\"select n from t1\").fetchval()\n assert isinstance(result, str)\n assert result == str(value).upper()\n pyodbc.native_uuid = True\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n # When true, we should return a uuid.UUID object.\n value = uuid.uuid4()\n cursor.execute(\"create table t1(n uniqueidentifier)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n pyodbc.native_uuid = True\n result = cursor.execute(\"select n from t1\").fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\ndef test_nextset(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n for i in range(4):\n cursor.execute(\"insert into t1(i) values(?)\", i)\n\n cursor.execute(\n \"\"\"\n select i from t1 where i < 2 order by i;\n select i from t1 where i >= 2 order by i\n \"\"\")\n\n for i, row in enumerate(cursor):\n assert i == row.i\n\n assert cursor.nextset()\n\n for i, row in enumerate(cursor):\n assert i + 2 == row.i\n\n\[email protected](IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\ndef test_fixed_unicode(cursor: pyodbc.Cursor):\n value = \"t\\xebsting\"\n cursor.execute(\"create table t1(s nchar(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"t\\xebsting\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n # If we alloc'd wrong, the test below might work because of an embedded NULL\n assert v == value\n\n\ndef test_chinese(cursor: pyodbc.Cursor):\n v = '我的'\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n row = cursor.fetchone()\n assert row[0] == v\n\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n rows = cursor.fetchall()\n assert rows[0][0] == v\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute(\"create table t1(b bit)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n v = cursor.execute(\"select b from t1\").fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n # From test provided by planders (thanks!) in Issue 91\n\n for (precision, scale, negative) in [\n (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True),\n (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True),\n (38, 10, True), (38, 38, True)]:\n\n try:\n cursor.execute(\"drop table t1\")\n except:\n pass\n\n cursor.execute(f\"create table t1(d decimal({precision}, {scale}))\")\n\n # Construct a decimal that uses the maximum precision and scale.\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and ('.' + '9' * scale) or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n\n cursor.execute(\"insert into t1 values(?)\", value)\n\n v = cursor.execute(\"select d from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7\n cursor.execute(\"create table t1(d decimal(10, 2))\")\n cursor.execute(\"insert into t1 values (?)\", value)\n result = cursor.execute(\"select * from t1\").fetchone()[0]\n assert result == value\n\n\ndef test_subquery_params(cursor: pyodbc.Cursor):\n \"\"\"Ensure parameter markers work in a subquery\"\"\"\n cursor.execute(\"create table t1(id integer, s varchar(20))\")\n cursor.execute(\"insert into t1 values (?,?)\", 1, 'test')\n row = cursor.execute(\"\"\"\n select x.id\n from (\n select id\n from t1\n where s = ?\n and id between ? and ?\n ) x\n \"\"\", 'test', 1, 10).fetchone()\n assert row is not None\n assert row[0] == 1\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"drop table if exists t1\")\n cursor.execute(\"create table t1(id integer, s varchar(20))\")\n cursor.execute(\"insert into t1 values (?,?)\", 1, 'test')\n cursor.execute(\"select * from t1\")\n\n cnxn.close()\n\n # Now that the connection is closed, we expect an exception. (If the code attempts to use\n # the HSTMT, we'll get an access violation instead.)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"select * from t1\")\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"\")\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = \"\"\n cursor = cnxn.cursor()\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_fixed_str(cursor: pyodbc.Cursor):\n value = \"testing\"\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n # If we alloc'd wrong, the test below might work because of an embedded NULL\n assert v == value\n\n\ndef test_empty_unicode(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s nvarchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"\")\n\n\ndef test_empty_unicode_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = \"\"\n cursor = cnxn.cursor()\n cursor.execute(\"create table t1(s nvarchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"1\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row[0] == \"1\"\n assert row[-1] == \"1\"\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc.\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008,\n reason='Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n\n cursor.execute(\"create table t1(d date)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select d from t1\").fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008,\n reason='Time not supported until 2008?')\ndef test_time(cursor: pyodbc.Cursor):\n value = datetime.now().time()\n\n # We aren't yet writing values using the new extended time type so the value written to the\n # database is only down to the second.\n value = value.replace(microsecond=0)\n\n cursor.execute(\"create table t1(t time)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select t from t1\").fetchone()[0]\n assert isinstance(result, time)\n assert value == result\n\n\ndef test_datetime(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction(cursor: pyodbc.Cursor):\n # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most\n # granular datetime supported is xxx000.\n\n value = datetime(2007, 1, 15, 3, 4, 5, 123000)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc\n # rounds down to what the database supports.\n\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", full)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n\n cursor.execute(\"create table t1(dt datetime2)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\")\n rows = cursor.execute(\"exec proc1\").fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n\n # Note: I've used \"set nocount on\" so that we don't get the number of rows deleted from\n # #tmptable. If you don't do this, you'd need to call nextset() once to skip it.\n\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\")\n cursor.execute(\"exec proc1\")\n assert cursor.description is not None\n assert len(cursor.description) == 4\n\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_vartbl(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)\n\n insert into @tmptbl\n select top 10 name, id, xtype, refdate\n from sysobjects\n\n select * from @tmptbl\n \"\"\")\n cursor.execute(\"exec proc1\")\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n # Reported in the forums that passing two datetimes to a stored procedure doesn't work.\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\")\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\")\n cursor.execute(\"exec test_sp ?, ?\", datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0 # 0 years apart\n\n\ndef test_sp_with_none(cursor: pyodbc.Cursor):\n # Reported in the forums that passing None caused an error.\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\")\n cursor.execute(\n \"\"\"\n create procedure test_sp(@x varchar(20))\n AS\n declare @y varchar(20)\n set @y = @x\n select @y\n \"\"\")\n cursor.execute(\"exec test_sp ?\", None)\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] is None # 0 years apart\n\n\n#\n# rowcount\n#\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n cursor.execute(\"delete from t1\")\n assert cursor.rowcount == count\n\n\ndef test_rowcount_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code. On the other hand, we could hardcode a zero return value.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n # This is a different code path internally.\n cursor.execute(\"delete from t1\")\n assert cursor.rowcount == 0\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n cursor.execute(\"select * from t1\")\n assert cursor.rowcount == -1\n\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\ndef test_rowcount_reset(cursor: pyodbc.Cursor):\n \"Ensure rowcount is reset after DDL\"\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n assert cursor.rowcount == 1\n\n cursor.execute(\"create table t2(i int)\")\n ddl_rowcount = (0 if IS_FREEDTS else -1)\n assert cursor.rowcount == ddl_rowcount\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n cursor.execute(\"insert into t1 values (1)\")\n v = cursor.execute(\"delete from t1\")\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n # This is a different code path internally.\n v = cursor.execute(\"delete from t1\")\n assert v == cursor\n\n\ndef test_retcursor_select(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n cursor.execute(\"insert into t1 values (1)\")\n v = cursor.execute(\"select * from t1\")\n assert v == cursor\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"Ensure we can select using [x z] syntax\"\n\n try:\n cursor.execute(\"create table [test one](int n)\")\n cursor.execute(\"insert into [test one] values(1)\")\n cursor.execute(\"select * from [test one]\")\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\ndef test_lower_case():\n \"Ensure pyodbc.lowercase forces returned column names to lowercase.\"\n try:\n pyodbc.lowercase = True\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(Abc int, dEf int)\")\n cursor.execute(\"select * from t1\")\n\n names = [t[0] for t in cursor.description]\n names.sort()\n\n assert names == [\"abc\", \"def\"]\n finally:\n # Put it back so other tests don't fail.\n pyodbc.lowercase = False\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute(\"create table t1(a int, b char(3))\")\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n # A project was failing to create temporary tables via select into.\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert v == \"testing\"\n\n cursor.execute(\"select s into t2 from t1\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert v == \"testing\"\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(i, str(i)) for i in range(1, 6)]\n\n cursor.executemany(\"insert into t1(a, b) values (?,?)\", params)\n\n count = cursor.execute(\"select count(*) from t1\").fetchone()[0]\n assert count == len(params)\n\n cursor.execute(\"select a, b from t1 order by a\")\n rows = cursor.fetchall()\n assert count == len(rows)\n\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_one(cursor: pyodbc.Cursor):\n \"Pass executemany a single sequence\"\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(1, \"test\")]\n\n cursor.executemany(\"insert into t1(a, b) values (?,?)\", params)\n\n count = cursor.execute(\"select count(*) from t1\").fetchone()[0]\n assert count == len(params)\n\n cursor.execute(\"select a, b from t1 order by a\")\n rows = cursor.fetchall()\n assert count == len(rows)\n\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute(\"create table t1(a nvarchar(max))\")\n\n cursor.fast_executemany = True\n cursor.executemany(\"insert into t1(a) values(?)\", [['']])\n\n assert cursor.execute(\"select a from t1\").fetchone()[0] == ''\n\n cursor.fast_executemany = False\n\n\ndef test_executemany_failure(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure that an exception is raised if one query in an executemany fails.\n \"\"\"\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(1, 'good'),\n ('error', 'not an int'),\n (3, 'good')]\n\n with pytest.raises(pyodbc.Error):\n cursor.executemany(\"insert into t1(a, b) value (?, ?)\", params)\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b int, c int, d int)\")\n cursor.execute(\"insert into t1 values(1,2,3,4)\")\n\n row = cursor.execute(\"select * from t1\").fetchone()\n\n result = row[:]\n assert result is row\n\n result = row[:-1]\n assert result == (1, 2, 3)\n\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b int, c int, d varchar(50))\")\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n\n row = cursor.execute(\"select * from t1\").fetchone()\n\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n\n result = str(row[:-1])\n assert result == \"(1, 2, 3)\"\n\n result = str(row[:1])\n assert result == \"(1,)\"\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n\n cursor.execute(\"create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))\")\n cursor.execute(\"insert into t1(c2, c3) values (?,?)\", v2, v3)\n\n row = cursor.execute(\"select c2, c3, c2 + c3 as both from t1\").fetchone()\n\n assert row.both == v2 + v3\n\n\ndef test_view_select(cursor: pyodbc.Cursor):\n # Reported in forum: Can't select from a view? I think I do this a lot, but another test\n # never hurts.\n\n # Create a table (t1) with 3 rows and a view (t2) into it.\n cursor.execute(\"create table t1(c1 int identity(1, 1), c2 varchar(50))\")\n for i in range(3):\n cursor.execute(\"insert into t1(c2) values (?)\", f\"string{i}\")\n cursor.execute(\"create view t2 as select * from t1\")\n\n # Select from the view\n cursor.execute(\"select * from t2\")\n rows = cursor.fetchall()\n assert rows is not None\n assert len(rows) == 3\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute(\"drop procedure pyodbctest\")\n cursor.commit()\n except:\n pass\n\n cursor.execute(\"create table t1(s varchar(10))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n\n cursor.execute(\"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\")\n\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3.\n\n cursor.execute(\"create table t1(id int)\")\n for i in range(1, 5):\n cursor.execute(\"insert into t1 values(?)\", i)\n cursor.execute(\"select id from t1 order by id\")\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0 # defaults to zero (off)\n\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n # Only lists and tuples are allowed.\n cursor.execute(\"create table t1 (word varchar (100))\")\n\n words = {'a', 'b', 'c'}\n\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"insert into t1 (word) values (?)\", words)\n\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany(\"insert into t1 (word) values (?)\", words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"Ensure we can use a Row object as a parameter to execute\"\n cursor.execute(\"create table t1(n int, s varchar(10))\")\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute(\"select n, s from t1\").fetchone()\n assert row\n\n cursor.execute(\"create table t2(n int, s varchar(10))\")\n cursor.execute(\"insert into t2 values (?, ?)\", row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"Ensure we can use a Row object as a parameter to executemany\"\n cursor.execute(\"create table t1(n int, s varchar(10))\")\n\n for i in range(3):\n cursor.execute(\"insert into t1 values (?, ?)\", i, chr(ord('a') + i))\n\n rows = cursor.execute(\"select n, s from t1\").fetchall()\n assert len(rows) != 0\n\n cursor.execute(\"create table t2(n int, s varchar(10))\")\n cursor.executemany(\"insert into t2 values (?, ?)\", rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"Ensure cursor.description is correct\"\n\n cursor.execute(\"create table t1(n int, s varchar(8), d decimal(5,2))\")\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute(\"select * from t1\")\n\n # (I'm not sure the precision of an int is constant across different versions, bits, so I'm\n # hand checking the items I do know.\n\n # int\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0 # scale\n assert t[6] is True # nullable\n\n # varchar(8)\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8 # precision\n assert t[5] == 0 # scale\n assert t[6] is True # nullable\n\n # decimal(5, 2)\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5 # precision\n assert t[5] == 2 # scale\n assert t[6] is True # nullable\n\n\ndef test_cursor_messages_with_print(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.\n \"\"\"\n assert not cursor.messages\n\n # SQL Server PRINT statements are never more than 8000 characters\n # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks\n for msg in ('hello world', 'ABCDEFGHIJ' * 800):\n cursor.execute(f\"PRINT '{msg}'\")\n messages = cursor.messages\n assert isinstance(messages, list)\n assert len(messages) == 1\n assert isinstance(messages[0], tuple)\n assert len(messages[0]) == 2\n assert isinstance(messages[0][0], str)\n assert isinstance(messages[0][1], str)\n assert '[01000] (0)' == messages[0][0]\n assert messages[0][1].endswith(msg)\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\")\n\n # The messages will look like:\n #\n # [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a\n\n # result set 1: messages, rows\n cursor.execute(\"exec test_cursor_messages\")\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [\n re.search(r'Message \\d[ab]$', m[1]).group(0)\n for m in cursor.messages\n ]\n assert msgs == ['Message 1a', 'Message 1b']\n\n # result set 2: rows, no messages\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n\n # result set 3: messages, no rows\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [\n re.search(r'Message \\d[ab]$', m[1]).group(0)\n for m in cursor.messages\n ]\n assert msgs == ['Message 2a', 'Message 2b']\n\n # result set 4: no rows, no messages\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"Ensure None can be used for params other than the first\"\n # Some driver/db versions would fail if NULL was not the first parameter because\n # SQLDescribeParam (only used with NULL) could not be used after the first call to\n # SQLBindParameter. This means None always worked for the first column, but did not work\n # for later columns.\n #\n # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked.\n # However, binary/varbinary won't allow an implicit conversion.\n\n cursor.execute(\"create table t1(n int, blob varbinary(max))\")\n cursor.execute(\"insert into t1 values (1, newid())\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n\n sql = \"update t1 set n=?, blob=?\"\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc\n # can't call SQLDescribeParam to get the correct parameter type. This can lead to\n # errors being returned from SQL Server when sp_prepexec is called, e.g., \"Implicit\n # conversion from data type varchar to varbinary(max) is not allowed.\"\n #\n # So at least verify that the user can manually specify the parameter type\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n def convert1(value):\n # The value is the raw bytes (as a bytes object) read from the\n # database. We'll simply add an X at the beginning at the end.\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n # Same as above, but add a Y at the beginning at the end.\n return 'Y' + value.decode('latin1') + 'Y'\n\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(n int, v varchar(10))\")\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n # Clear all conversions and try again. There should be no Xs this time.\n cnxn.clear_output_converters()\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # Same but clear using remove_output_converter.\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # Clear via add_output_converter, passing None for the converter function.\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # retrieve and temporarily replace converter (get_output_converter)\n #\n # case_1: converter already registered\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n #\n # case_2: no converter already registered\n cnxn.clear_output_converters()\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute(\"create table t1(s varchar(800))\")\n\n with pytest.raises(pyodbc.Error):\n cursor.execute(\"insert into t1 values (?)\", value)\n\n\ndef test_row_equal(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int, s varchar(20))\")\n cursor.execute(\"insert into t1 values (1, 'test')\")\n row1 = cursor.execute(\"select n, s from t1\").fetchone()\n row2 = cursor.execute(\"select n, s from t1\").fetchone()\n assert row1 == row2\n\n\ndef test_row_gtlt(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int, s varchar(20))\")\n cursor.execute(\"insert into t1 values (1, 'test1')\")\n cursor.execute(\"insert into t1 values (1, 'test2')\")\n rows = cursor.execute(\"select n, s from t1 order by s\").fetchall()\n assert rows[0] < rows[1]\n assert rows[0] <= rows[1]\n assert rows[1] > rows[0]\n assert rows[1] >= rows[0]\n assert rows[0] != rows[1]\n\n rows = list(rows)\n rows.sort() # uses <\n\n\ndef test_context_manager_success():\n \"Ensure `with` commits if an exception is not raised\"\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(n int)\")\n cnxn.commit()\n\n with cnxn:\n cursor.execute(\"insert into t1 values (1)\")\n\n rows = cursor.execute(\"select n from t1\").fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"Ensure `with` rolls back if an exception is raised\"\n cnxn = connect()\n cursor = cnxn.cursor()\n\n # We'll insert a row and commit it. Then we'll insert another row followed by an\n # exception.\n\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"insert into t1 values (1)\")\n cnxn.commit()\n\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute(\"insert into t1 values (2)\")\n cursor.execute(\"delete from bogus\")\n\n cursor.execute(\"select max(n) from t1\")\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n # From issue 129\n value = cursor.execute(\"select ?\", None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute(\"drop function func1\")\n except:\n pass\n cursor.execute(\"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\")\n cursor.commit()\n value = cursor.execute(\"select * from func1(?)\", 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error\n #\n # Error: TypeError: argument 2 must be str, not None\n #\n # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use \"|s\" for an\n # optional string keyword when calling indirectly.\n\n cursor.execute(\"create table t1(a int, b varchar(3), xΏz varchar(4))\")\n\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n\n # Now do the same, but specifically pass in None to one of the keywords. Old versions\n # were parsing arguments incorrectly and would raise an error. (This crops up when\n # calling indirectly like columns(*args, **kwargs) which aiodbc does.)\n\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n\n cursor.execute(f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\")\n\n col_count = len([col.column_name for col in cursor.columns(table_name)])\n assert col_count == 1\n\n cursor.execute(f\"drop table {table_name}\")\n\n\ndef test_cancel(cursor: pyodbc.Cursor):\n # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with\n # making sure SQLCancel is called correctly.\n cursor.execute(\"select 1\")\n cursor.cancel()\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n # https://github.com/mkleehammer/pyodbc/issues/423\n #\n # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number\n # of characters. Ensure it works even with 4-byte characters.\n #\n # http://www.fileformat.info/info/unicode/char/1f31c/index.htm\n\n v = \"x \\U0001F31C z\"\n\n cursor.execute(\"create table t1(s nvarchar(100))\")\n cursor.execute(\"insert into t1 values (?)\", v)\n\n result = cursor.execute(\"select s from t1\").fetchone()[0]\n\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n # similar to `test_emoticons_as_parameter`, above, except for Unicode literal\n #\n # http://www.fileformat.info/info/unicode/char/1f31c/index.htm\n\n # FreeTDS ODBC issue fixed in version 1.1.23\n # https://github.com/FreeTDS/freetds/issues/317\n\n v = \"x \\U0001F31C z\"\n\n cursor.execute(\"create table t1(s nvarchar(100))\")\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n\n result = cursor.execute(\"select s from t1\").fetchone()[0]\n\n assert result == v\n\n\ndef _test_tvp(cursor: pyodbc.Cursor, diff_schema):\n # Test table value parameters (TVP). I like the explanation here:\n #\n # https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/\n #\n # \"At a high level the TVP allows you to populate a table declared as a T-SQL variable,\n # then pass that table as a parameter to a stored procedure or function.\"\n #\n # \"The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE,\n # DELETE) against the TVP; you can only reference it in a SELECT statement.\"\n #\n # In this test we'll create a table, pass it to a stored procedure, and have the stored\n # procedure simply return the rows from the TVP.\n #\n # Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm\n # not sure I like that as it is very generic and specific to SQL Server. It would be wiser\n # to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary`\n # object.\n\n pyodbc.native_uuid = True\n # This is the default, but we'll reset it in case a previous test fails to.\n\n procname = 'SelectTVP'\n typename = 'TestTVP'\n\n if diff_schema:\n schemaname = 'myschema'\n procname = schemaname + '.' + procname\n typenameonly = typename\n typename = schemaname + '.' + typename\n\n # (Don't use \"if exists\" since older SQL Servers don't support it.)\n try:\n cursor.execute(\"drop procedure \" + procname)\n except:\n pass\n try:\n cursor.execute(\"drop type \" + typename)\n except:\n pass\n if diff_schema:\n try:\n cursor.execute(\"drop schema \" + schemaname)\n except:\n pass\n cursor.commit()\n\n if diff_schema:\n cursor.execute(\"CREATE SCHEMA myschema\")\n cursor.commit()\n\n cursor.execute(\n f\"\"\"\n CREATE TYPE {typename} AS TABLE(\n c01 VARCHAR(255),\n c02 VARCHAR(MAX),\n c03 VARBINARY(255),\n c04 VARBINARY(MAX),\n c05 BIT,\n c06 DATE,\n c07 TIME,\n c08 DATETIME2(5),\n c09 BIGINT,\n c10 FLOAT,\n c11 NUMERIC(38, 24),\n c12 UNIQUEIDENTIFIER)\n \"\"\")\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE PROCEDURE {procname} @TVP {typename} READONLY\n AS SELECT * FROM @TVP;\n \"\"\")\n cursor.commit()\n\n # The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than\n # the loop we had before.\n VERY_LONG_LEN = 2000000\n long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters\n long_bytearray = bytes(list(range(255)))\n very_long_string = long_string * (VERY_LONG_LEN // len(long_string))\n very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray))\n\n params = [\n # Three rows with all of the types in the table defined above.\n (\n 'abc', 'abc',\n bytes([0xD1, 0xCE, 0xFA, 0xCE]),\n bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True,\n date(1997, 8, 29), time(9, 13, 39),\n datetime(2018, 11, 13, 13, 33, 26, 298420),\n 1234567, 3.14, Decimal('31234567890123.141243449787580175325274'),\n uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'),\n ),\n (\n '', '',\n bytes([0x00, 0x01, 0x02, 0x03, 0x04]),\n bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False,\n date(1, 1, 1), time(0, 0, 0),\n datetime(1, 1, 1, 0, 0, 0, 0),\n -9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'),\n uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'),\n ),\n (\n long_string, very_long_string,\n bytes(long_bytearray), bytes(very_long_bytearray), True,\n date(9999, 12, 31), time(23, 59, 59),\n datetime(9999, 12, 31, 23, 59, 59, 999990),\n 9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'),\n uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),\n )\n ]\n\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = [tuple(row) for row in cursor.execute(f\"exec {procname} ?\", p1).fetchall()]\n\n # The values make it very difficult to troubleshoot if something is wrong, so instead of\n # asserting they are the same, we'll walk them if there is a problem to identify which is\n # wrong.\n for row, param in zip(result_array, params):\n if row != param:\n for r, p in zip(row, param):\n assert r == p\n\n # Now test with zero rows.\n\n params = []\n p1 = [params]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = cursor.execute(f\"exec {procname} ?\", p1).fetchall()\n assert result_array == params\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp(cursor: pyodbc.Cursor):\n _test_tvp(cursor, False)\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n\n # Put non-ASCII characters at the front so we don't end up chopping one in half in a\n # multi-byte encoding like UTF-8.\n\n v = 'á'\n\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n\n if remaining <= len(seed):\n v += seed\n else:\n c = (remaining + len(seed) - 1 // len(seed))\n v += seed * c\n\n if encoding:\n v = v.encode(encoding)\n\n # We chop *after* encoding because if we are encoding then we want bytes.\n v = v[:length]\n\n return v\n",
"step-ids": [
47,
56,
70,
97,
108
]
}
|
[
47,
56,
70,
97,
108
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
if n == 0:
nums1 = nums1
if nums1[m - 1] <= nums2[0]:
for i in range(n):
nums1[m + i] = nums2[i]
elif nums1[0] >= nums2[-1]:
for i in range(m):
nums1[i] = nums1[n + i]
else:
ans = [None] * len(nums1)
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
print('take 1: ', nums1[i])
ans[k] = nums1[i]
i += 1
else:
print('take 2: ', nums2[j])
ans[k] = nums2[j]
j += 1
k += 1
nums1 = ans
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
if n == 0:
nums1 = nums1
if nums1[m - 1] <= nums2[0]:
for i in range(n):
nums1[m + i] = nums2[i]
elif nums1[0] >= nums2[-1]:
for i in range(m):
nums1[i] = nums1[n + i]
else:
ans = [None] * len(nums1)
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
print('take 1: ', nums1[i])
ans[k] = nums1[i]
i += 1
else:
print('take 2: ', nums2[j])
ans[k] = nums2[j]
j += 1
k += 1
nums1 = ans
if __name__ == '__main__':
solve = Solution()
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
solve.merge(nums1, m, nums2, n)
print(nums1)
<|reserved_special_token_1|>
class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
if n == 0:
nums1 = nums1
if nums1[m-1] <= nums2[0]:
for i in range(n):
nums1[m+i] = nums2[i]
elif nums1[0] >= nums2[-1]:
for i in range(m):
nums1[i] = nums1[n+i]
else:
ans = [None]*len(nums1)
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
print("take 1: ", nums1[i])
ans[k] = nums1[i]
i += 1
else:
print("take 2: ", nums2[j])
ans[k] = nums2[j]
j += 1
k += 1
nums1 = ans
if __name__ == "__main__":
solve = Solution()
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
solve.merge(nums1, m, nums2, n)
print(nums1)
|
flexible
|
{
"blob_id": "4f13e2858d9cf469f14026808142886e5c3fcc85",
"index": 28,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if n == 0:\n nums1 = nums1\n if nums1[m - 1] <= nums2[0]:\n for i in range(n):\n nums1[m + i] = nums2[i]\n elif nums1[0] >= nums2[-1]:\n for i in range(m):\n nums1[i] = nums1[n + i]\n else:\n ans = [None] * len(nums1)\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print('take 1: ', nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print('take 2: ', nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n nums1 = ans\n\n\n<mask token>\n",
"step-4": "class Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if n == 0:\n nums1 = nums1\n if nums1[m - 1] <= nums2[0]:\n for i in range(n):\n nums1[m + i] = nums2[i]\n elif nums1[0] >= nums2[-1]:\n for i in range(m):\n nums1[i] = nums1[n + i]\n else:\n ans = [None] * len(nums1)\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print('take 1: ', nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print('take 2: ', nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n nums1 = ans\n\n\nif __name__ == '__main__':\n solve = Solution()\n nums1 = [1, 2, 3, 0, 0, 0]\n m = 3\n nums2 = [2, 5, 6]\n n = 3\n solve.merge(nums1, m, nums2, n)\n print(nums1)\n",
"step-5": "class Solution:\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n \n if n == 0:\n nums1 = nums1\n if nums1[m-1] <= nums2[0]:\n \n for i in range(n):\n nums1[m+i] = nums2[i]\n \n elif nums1[0] >= nums2[-1]:\n \n for i in range(m):\n nums1[i] = nums1[n+i]\n else:\n ans = [None]*len(nums1)\n i = 0\n j = 0\n k = 0\n \n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print(\"take 1: \", nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print(\"take 2: \", nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n\n nums1 = ans\n\nif __name__ == \"__main__\":\n solve = Solution()\n nums1 = [1,2,3,0,0,0]\n m = 3\n nums2 = [2,5,6]\n n = 3\n solve.merge(nums1, m, nums2, n)\n print(nums1)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def vecs2numpy(fname, new_file_name, file_type, file_len=None):
if file_type == 'bvecs':
vectors, dim = vecs_io.bvecs_read_mmap(fname)
elif file_type == 'ivecs':
vectors, dim = vecs_io.ivecs_read_mmap(fname)
elif file_type == 'fvecs':
vectors, dim = vecs_io.fvecs_read_mmap(fname)
if file_len is not None:
vectors = vectors[:file_len]
vectors = vectors.astype(np.float32)
np.save(new_file_name, vectors)
return vectors
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def vecs2numpy(fname, new_file_name, file_type, file_len=None):
if file_type == 'bvecs':
vectors, dim = vecs_io.bvecs_read_mmap(fname)
elif file_type == 'ivecs':
vectors, dim = vecs_io.ivecs_read_mmap(fname)
elif file_type == 'fvecs':
vectors, dim = vecs_io.fvecs_read_mmap(fname)
if file_len is not None:
vectors = vectors[:file_len]
vectors = vectors.astype(np.float32)
np.save(new_file_name, vectors)
return vectors
<|reserved_special_token_0|>
def get_base_query_gnd(config):
os.system('mkdir %s' % config['project_data_dir'])
print('创建文件夹')
base_dir = '%s/%s' % (config['source_data_dir'], config[
'source_data_fname']['base'])
base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')
base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])
print('提取base')
query_dir = '%s/%s' % (config['source_data_dir'], config[
'source_data_fname']['query'])
query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')
query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])
print('提取query')
gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')
gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)
print('提取gnd')
return base, query, gnd
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def vecs2numpy(fname, new_file_name, file_type, file_len=None):
if file_type == 'bvecs':
vectors, dim = vecs_io.bvecs_read_mmap(fname)
elif file_type == 'ivecs':
vectors, dim = vecs_io.ivecs_read_mmap(fname)
elif file_type == 'fvecs':
vectors, dim = vecs_io.fvecs_read_mmap(fname)
if file_len is not None:
vectors = vectors[:file_len]
vectors = vectors.astype(np.float32)
np.save(new_file_name, vectors)
return vectors
<|reserved_special_token_0|>
def get_base_query_gnd(config):
os.system('mkdir %s' % config['project_data_dir'])
print('创建文件夹')
base_dir = '%s/%s' % (config['source_data_dir'], config[
'source_data_fname']['base'])
base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')
base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])
print('提取base')
query_dir = '%s/%s' % (config['source_data_dir'], config[
'source_data_fname']['query'])
query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')
query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])
print('提取query')
gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')
gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)
print('提取gnd')
return base, query, gnd
if __name__ == '__main__':
fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'
new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
get_NN_graph(fname, new_fname, 10)
a = '/home/bz/KaHIP/deploy/graphchecker'
b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
<|reserved_special_token_1|>
import numpy as np
import faiss
from util import vecs_io, vecs_util
from time import time
import os
<|reserved_special_token_0|>
def vecs2numpy(fname, new_file_name, file_type, file_len=None):
if file_type == 'bvecs':
vectors, dim = vecs_io.bvecs_read_mmap(fname)
elif file_type == 'ivecs':
vectors, dim = vecs_io.ivecs_read_mmap(fname)
elif file_type == 'fvecs':
vectors, dim = vecs_io.fvecs_read_mmap(fname)
if file_len is not None:
vectors = vectors[:file_len]
vectors = vectors.astype(np.float32)
np.save(new_file_name, vectors)
return vectors
<|reserved_special_token_0|>
def get_base_query_gnd(config):
os.system('mkdir %s' % config['project_data_dir'])
print('创建文件夹')
base_dir = '%s/%s' % (config['source_data_dir'], config[
'source_data_fname']['base'])
base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')
base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])
print('提取base')
query_dir = '%s/%s' % (config['source_data_dir'], config[
'source_data_fname']['query'])
query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')
query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])
print('提取query')
gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')
gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)
print('提取gnd')
return base, query, gnd
if __name__ == '__main__':
fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'
new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
get_NN_graph(fname, new_fname, 10)
a = '/home/bz/KaHIP/deploy/graphchecker'
b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
<|reserved_special_token_1|>
import numpy as np
import faiss
from util import vecs_io, vecs_util
from time import time
import os
'''
提取vecs, 输出numpy文件
'''
def vecs2numpy(fname, new_file_name, file_type, file_len=None):
if file_type == 'bvecs':
vectors, dim = vecs_io.bvecs_read_mmap(fname)
elif file_type == 'ivecs':
vectors, dim = vecs_io.ivecs_read_mmap(fname)
elif file_type == 'fvecs':
vectors, dim = vecs_io.fvecs_read_mmap(fname)
if file_len is not None:
vectors = vectors[:file_len]
vectors = vectors.astype(np.float32)
np.save(new_file_name, vectors)
return vectors
'''
创建文件夹, 提取base, query, gnd
'''
def get_base_query_gnd(config):
os.system("mkdir %s" % (config['project_data_dir']))
print("创建文件夹")
base_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['base'])
base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')
base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])
print("提取base")
query_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['query'])
query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')
query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])
print("提取query")
gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')
# print(base_npy_dir)
# print(query_npy_dir)
# print(gnd_npy_dir)
gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)
print("提取gnd")
return base, query, gnd
if __name__ == '__main__':
fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'
new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
get_NN_graph(fname, new_fname, 10)
a = '/home/bz/KaHIP/deploy/graphchecker'
b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
|
flexible
|
{
"blob_id": "5f84c8654c976bca2fa33e8f9ba5e28e3249253d",
"index": 7312,
"step-1": "<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n\n\ndef get_base_query_gnd(config):\n os.system('mkdir %s' % config['project_data_dir'])\n print('创建文件夹')\n base_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['base'])\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\n print('提取base')\n query_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['query'])\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\n print('提取query')\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\n print('提取gnd')\n return base, query, gnd\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n\n\ndef get_base_query_gnd(config):\n os.system('mkdir %s' % config['project_data_dir'])\n print('创建文件夹')\n base_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['base'])\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\n print('提取base')\n query_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['query'])\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\n print('提取query')\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\n print('提取gnd')\n return base, query, gnd\n\n\nif __name__ == '__main__':\n fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'\n new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n get_NN_graph(fname, new_fname, 10)\n a = '/home/bz/KaHIP/deploy/graphchecker'\n b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n",
"step-4": "import numpy as np\nimport faiss\nfrom util import vecs_io, vecs_util\nfrom time import time\nimport os\n<mask token>\n\n\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\n if file_type == 'bvecs':\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\n elif file_type == 'ivecs':\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\n elif file_type == 'fvecs':\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\n if file_len is not None:\n vectors = vectors[:file_len]\n vectors = vectors.astype(np.float32)\n np.save(new_file_name, vectors)\n return vectors\n\n\n<mask token>\n\n\ndef get_base_query_gnd(config):\n os.system('mkdir %s' % config['project_data_dir'])\n print('创建文件夹')\n base_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['base'])\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\n print('提取base')\n query_dir = '%s/%s' % (config['source_data_dir'], config[\n 'source_data_fname']['query'])\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\n print('提取query')\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\n print('提取gnd')\n return base, query, gnd\n\n\nif __name__ == '__main__':\n fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'\n new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n get_NN_graph(fname, new_fname, 10)\n a = '/home/bz/KaHIP/deploy/graphchecker'\n b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\n",
"step-5": "import numpy as np\r\nimport faiss\r\nfrom util import vecs_io, vecs_util\r\nfrom time import time\r\nimport os\r\n\r\n'''\r\n提取vecs, 输出numpy文件\r\n'''\r\n\r\n\r\ndef vecs2numpy(fname, new_file_name, file_type, file_len=None):\r\n if file_type == 'bvecs':\r\n vectors, dim = vecs_io.bvecs_read_mmap(fname)\r\n elif file_type == 'ivecs':\r\n vectors, dim = vecs_io.ivecs_read_mmap(fname)\r\n elif file_type == 'fvecs':\r\n vectors, dim = vecs_io.fvecs_read_mmap(fname)\r\n if file_len is not None:\r\n vectors = vectors[:file_len]\r\n vectors = vectors.astype(np.float32)\r\n np.save(new_file_name, vectors)\r\n return vectors\r\n\r\n\r\n'''\r\n创建文件夹, 提取base, query, gnd\r\n'''\r\n\r\n\r\ndef get_base_query_gnd(config):\r\n os.system(\"mkdir %s\" % (config['project_data_dir']))\r\n print(\"创建文件夹\")\r\n\r\n base_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['base'])\r\n base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy')\r\n base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type'])\r\n print(\"提取base\")\r\n\r\n query_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['query'])\r\n query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy')\r\n query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type'])\r\n print(\"提取query\")\r\n\r\n gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy')\r\n # print(base_npy_dir)\r\n # print(query_npy_dir)\r\n # print(gnd_npy_dir)\r\n gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir)\r\n print(\"提取gnd\")\r\n return base, query, gnd\r\n\r\n\r\nif __name__ == '__main__':\r\n fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy'\r\n new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\r\n get_NN_graph(fname, new_fname, 10)\r\n a = '/home/bz/KaHIP/deploy/graphchecker'\r\n b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class A2C_agent(object):
<|reserved_special_token_0|>
def act(self, state):
action_distribution = self.actor_network.forward(state)
action = np.random.choice(self.num_of_actions, p=
action_distribution.detach().numpy())
return action
def memorize(self, state, action, new_state, reward, done):
self.experience_replay_buffer.push(state, action, new_state, reward,
done)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class A2C_agent(object):
<|reserved_special_token_0|>
def act(self, state):
action_distribution = self.actor_network.forward(state)
action = np.random.choice(self.num_of_actions, p=
action_distribution.detach().numpy())
return action
def memorize(self, state, action, new_state, reward, done):
self.experience_replay_buffer.push(state, action, new_state, reward,
done)
def learn(self, rewards_batch, states_batch, actions_batch,
new_states_batch, new_actions_batch):
states_batch = np.asarray(states_batch)
actions_batch = torch.tensor(actions_batch, dtype=torch.long)
rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)
new_states_batch = np.asarray(states_batch)
new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)
V_batch = []
V_prime_batch = []
for state, new_state, new_action in zip(states_batch,
new_states_batch, new_actions_batch):
state = torch.Tensor(state)
v_value = self.critic_network.forward(state)
V_batch.append(v_value)
new_state = torch.Tensor(new_state)
v_prime_value = self.critic_network.forward(new_state)
V_prime_batch.append(v_prime_value)
log_probs = torch.log(self.actor_network(states_batch))
selected_log_probs = rewards_batch * log_probs[np.arange(len(
actions_batch)), actions_batch]
actor_loss = -selected_log_probs.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
V_prime_batch = torch.stack(V_prime_batch)
V_batch = torch.stack(V_batch)
advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch
critic_loss = (V_batch - (rewards_batch + self.critic_gamma *
V_prime_batch)).pow(2).mean()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class A2C_agent(object):
def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,
critic_gamma, mem_size, critic_hidden_size, critic_lr,
critic_batch_size):
self.env = env
self.actor_hidden_size = actor_hidden_size
self.actor_lr = actor_lr
self.actor_batch_size = actor_batch_size
self.critic_hidden_size = critic_hidden_size
self.critic_lr = critic_lr
self.critic_batch_size = critic_batch_size
self.critic_gamma = critic_gamma
self.mem_size = mem_size
self.num_of_states = env.observation_space.shape[0]
self.num_of_actions = env.action_space.n
self.experience_replay_buffer = ReplayBuffer(self.mem_size)
self.actor_network = ActorNet(self.num_of_states, self.
actor_hidden_size, self.num_of_actions)
self.actor_optimizer = optim.Adam(self.actor_network.parameters(),
lr=self.actor_lr)
self.critic_network = CriticNet(self.num_of_states, self.
critic_hidden_size, 1)
self.critic_optimizer = optim.Adam(self.critic_network.parameters(),
lr=self.critic_lr)
def act(self, state):
action_distribution = self.actor_network.forward(state)
action = np.random.choice(self.num_of_actions, p=
action_distribution.detach().numpy())
return action
def memorize(self, state, action, new_state, reward, done):
self.experience_replay_buffer.push(state, action, new_state, reward,
done)
def learn(self, rewards_batch, states_batch, actions_batch,
new_states_batch, new_actions_batch):
states_batch = np.asarray(states_batch)
actions_batch = torch.tensor(actions_batch, dtype=torch.long)
rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)
new_states_batch = np.asarray(states_batch)
new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)
V_batch = []
V_prime_batch = []
for state, new_state, new_action in zip(states_batch,
new_states_batch, new_actions_batch):
state = torch.Tensor(state)
v_value = self.critic_network.forward(state)
V_batch.append(v_value)
new_state = torch.Tensor(new_state)
v_prime_value = self.critic_network.forward(new_state)
V_prime_batch.append(v_prime_value)
log_probs = torch.log(self.actor_network(states_batch))
selected_log_probs = rewards_batch * log_probs[np.arange(len(
actions_batch)), actions_batch]
actor_loss = -selected_log_probs.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
V_prime_batch = torch.stack(V_prime_batch)
V_batch = torch.stack(V_batch)
advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch
critic_loss = (V_batch - (rewards_batch + self.critic_gamma *
V_prime_batch)).pow(2).mean()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import torch
import torch.optim as optim
from utilities import *
from model import *
from torch.autograd import Variable
import numpy as np
import random
class A2C_agent(object):
def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,
critic_gamma, mem_size, critic_hidden_size, critic_lr,
critic_batch_size):
self.env = env
self.actor_hidden_size = actor_hidden_size
self.actor_lr = actor_lr
self.actor_batch_size = actor_batch_size
self.critic_hidden_size = critic_hidden_size
self.critic_lr = critic_lr
self.critic_batch_size = critic_batch_size
self.critic_gamma = critic_gamma
self.mem_size = mem_size
self.num_of_states = env.observation_space.shape[0]
self.num_of_actions = env.action_space.n
self.experience_replay_buffer = ReplayBuffer(self.mem_size)
self.actor_network = ActorNet(self.num_of_states, self.
actor_hidden_size, self.num_of_actions)
self.actor_optimizer = optim.Adam(self.actor_network.parameters(),
lr=self.actor_lr)
self.critic_network = CriticNet(self.num_of_states, self.
critic_hidden_size, 1)
self.critic_optimizer = optim.Adam(self.critic_network.parameters(),
lr=self.critic_lr)
def act(self, state):
action_distribution = self.actor_network.forward(state)
action = np.random.choice(self.num_of_actions, p=
action_distribution.detach().numpy())
return action
def memorize(self, state, action, new_state, reward, done):
self.experience_replay_buffer.push(state, action, new_state, reward,
done)
def learn(self, rewards_batch, states_batch, actions_batch,
new_states_batch, new_actions_batch):
states_batch = np.asarray(states_batch)
actions_batch = torch.tensor(actions_batch, dtype=torch.long)
rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)
new_states_batch = np.asarray(states_batch)
new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)
V_batch = []
V_prime_batch = []
for state, new_state, new_action in zip(states_batch,
new_states_batch, new_actions_batch):
state = torch.Tensor(state)
v_value = self.critic_network.forward(state)
V_batch.append(v_value)
new_state = torch.Tensor(new_state)
v_prime_value = self.critic_network.forward(new_state)
V_prime_batch.append(v_prime_value)
log_probs = torch.log(self.actor_network(states_batch))
selected_log_probs = rewards_batch * log_probs[np.arange(len(
actions_batch)), actions_batch]
actor_loss = -selected_log_probs.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
V_prime_batch = torch.stack(V_prime_batch)
V_batch = torch.stack(V_batch)
advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch
critic_loss = (V_batch - (rewards_batch + self.critic_gamma *
V_prime_batch)).pow(2).mean()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 19:21:32 2019
@author: Nikos
"""
import torch
import torch.optim as optim
from utilities import *
from model import *
from torch.autograd import Variable
import numpy as np
import random
class A2C_agent(object):
def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,
critic_gamma, mem_size, critic_hidden_size, critic_lr, critic_batch_size):
self.env = env
self.actor_hidden_size = actor_hidden_size
self.actor_lr = actor_lr
self.actor_batch_size = actor_batch_size
self.critic_hidden_size = critic_hidden_size
self.critic_lr = critic_lr
self.critic_batch_size = critic_batch_size
self.critic_gamma = critic_gamma
self.mem_size = mem_size
self.num_of_states = env.observation_space.shape[0]
self.num_of_actions = env.action_space.n
self.experience_replay_buffer = ReplayBuffer(self.mem_size)
# initialize the Actor network (policy)
self.actor_network = ActorNet(self.num_of_states, self.actor_hidden_size, self.num_of_actions)
self.actor_optimizer = optim.Adam(self.actor_network.parameters(), lr = self.actor_lr)
# initialize the Critic network (v-learning)
# The difference between the critic in A2C (here) and the
# critic int he "vanilla" Actor-Critic version is that the
# critic in A2C models the value function, hence it needs
# to only output the value of each state and not the Q-value
# for each (state, action) pair. Therefore, the output size
# here needs to be a scalar.
self.critic_network = CriticNet(self.num_of_states, self.critic_hidden_size, 1)
self.critic_optimizer = optim.Adam(self.critic_network.parameters(), lr = self.critic_lr)
def act(self, state):
# compute the action distribution based on the current state via the policy net
action_distribution = self.actor_network.forward(state)
# pick an action based on that distribution
action = np.random.choice(self.num_of_actions, p = action_distribution.detach().numpy())
return action
def memorize(self, state, action, new_state, reward, done):
# this function takes a transition (state, action, new_state, reward, done)
# and stores it into the experience memory buffer
self.experience_replay_buffer.push(state, action, new_state, reward, done)
def learn(self, rewards_batch, states_batch, actions_batch, new_states_batch, new_actions_batch):
#states_batch = torch.tensor(states_batch, dtype=torch.float)
states_batch = np.asarray(states_batch)
actions_batch = torch.tensor(actions_batch, dtype=torch.long)
rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)
new_states_batch = np.asarray(states_batch)
new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)
V_batch = []
V_prime_batch = []
for state, new_state, new_action in zip(states_batch,\
new_states_batch, new_actions_batch):
state = torch.Tensor(state)
v_value = self.critic_network.forward(state)
# get q-value for specific action
#Q = q_values.gather(-1, action)
V_batch.append(v_value)
new_state = torch.Tensor(new_state)
v_prime_value = self.critic_network.forward(new_state)
#V_prime = q_prime_values.gather(-1, new_action)
V_prime_batch.append(v_prime_value)
# compute the log of the probabilities that the policy outputs for each state
log_probs = torch.log(self.actor_network(states_batch))
# pick those log probabilities that correspond to the actions that were selected
selected_log_probs = rewards_batch * log_probs[np.arange(len(actions_batch)), actions_batch]
# compute the monte-carlo estimate by averaging the losses and then form the optimization
# criterion, which will be the negative log probs.
actor_loss = -selected_log_probs.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
# if we need smooth updates we clip the grads between -1 and 1
#for param in self.online_dqn_network.parameters():
# param.grad.data.clamp_(-1,1)
self.actor_optimizer.step()
# Compute TD error for V network
V_prime_batch = torch.stack(V_prime_batch)
V_batch = torch.stack(V_batch)
# A(s, a) = r_prime + gamma * V_prime - V
advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch
#print(deltas)
critic_loss = (V_batch - (rewards_batch + self.critic_gamma * V_prime_batch)).pow(2).mean()
#print(critic_loss)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
#return loss
|
flexible
|
{
"blob_id": "72b086e833ab3ee4ec3102869d74513ef3657675",
"index": 1926,
"step-1": "<mask token>\n\n\nclass A2C_agent(object):\n <mask token>\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass A2C_agent(object):\n <mask token>\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n\n def learn(self, rewards_batch, states_batch, actions_batch,\n new_states_batch, new_actions_batch):\n states_batch = np.asarray(states_batch)\n actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n new_states_batch = np.asarray(states_batch)\n new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n V_batch = []\n V_prime_batch = []\n for state, new_state, new_action in zip(states_batch,\n new_states_batch, new_actions_batch):\n state = torch.Tensor(state)\n v_value = self.critic_network.forward(state)\n V_batch.append(v_value)\n new_state = torch.Tensor(new_state)\n v_prime_value = self.critic_network.forward(new_state)\n V_prime_batch.append(v_prime_value)\n log_probs = torch.log(self.actor_network(states_batch))\n selected_log_probs = rewards_batch * log_probs[np.arange(len(\n actions_batch)), actions_batch]\n actor_loss = -selected_log_probs.mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n V_prime_batch = torch.stack(V_prime_batch)\n V_batch = torch.stack(V_batch)\n advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n critic_loss = (V_batch - (rewards_batch + self.critic_gamma *\n V_prime_batch)).pow(2).mean()\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n",
"step-3": "<mask token>\n\n\nclass A2C_agent(object):\n\n def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,\n critic_gamma, mem_size, critic_hidden_size, critic_lr,\n critic_batch_size):\n self.env = env\n self.actor_hidden_size = actor_hidden_size\n self.actor_lr = actor_lr\n self.actor_batch_size = actor_batch_size\n self.critic_hidden_size = critic_hidden_size\n self.critic_lr = critic_lr\n self.critic_batch_size = critic_batch_size\n self.critic_gamma = critic_gamma\n self.mem_size = mem_size\n self.num_of_states = env.observation_space.shape[0]\n self.num_of_actions = env.action_space.n\n self.experience_replay_buffer = ReplayBuffer(self.mem_size)\n self.actor_network = ActorNet(self.num_of_states, self.\n actor_hidden_size, self.num_of_actions)\n self.actor_optimizer = optim.Adam(self.actor_network.parameters(),\n lr=self.actor_lr)\n self.critic_network = CriticNet(self.num_of_states, self.\n critic_hidden_size, 1)\n self.critic_optimizer = optim.Adam(self.critic_network.parameters(),\n lr=self.critic_lr)\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n\n def learn(self, rewards_batch, states_batch, actions_batch,\n new_states_batch, new_actions_batch):\n states_batch = np.asarray(states_batch)\n actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n new_states_batch = np.asarray(states_batch)\n new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n V_batch = []\n V_prime_batch = []\n for state, new_state, new_action in zip(states_batch,\n new_states_batch, new_actions_batch):\n state = torch.Tensor(state)\n v_value = self.critic_network.forward(state)\n V_batch.append(v_value)\n new_state = torch.Tensor(new_state)\n v_prime_value = self.critic_network.forward(new_state)\n V_prime_batch.append(v_prime_value)\n log_probs = torch.log(self.actor_network(states_batch))\n selected_log_probs = rewards_batch * log_probs[np.arange(len(\n actions_batch)), actions_batch]\n actor_loss = -selected_log_probs.mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n V_prime_batch = torch.stack(V_prime_batch)\n V_batch = torch.stack(V_batch)\n advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n critic_loss = (V_batch - (rewards_batch + self.critic_gamma *\n V_prime_batch)).pow(2).mean()\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n",
"step-4": "<mask token>\nimport torch\nimport torch.optim as optim\nfrom utilities import *\nfrom model import *\nfrom torch.autograd import Variable\nimport numpy as np\nimport random\n\n\nclass A2C_agent(object):\n\n def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,\n critic_gamma, mem_size, critic_hidden_size, critic_lr,\n critic_batch_size):\n self.env = env\n self.actor_hidden_size = actor_hidden_size\n self.actor_lr = actor_lr\n self.actor_batch_size = actor_batch_size\n self.critic_hidden_size = critic_hidden_size\n self.critic_lr = critic_lr\n self.critic_batch_size = critic_batch_size\n self.critic_gamma = critic_gamma\n self.mem_size = mem_size\n self.num_of_states = env.observation_space.shape[0]\n self.num_of_actions = env.action_space.n\n self.experience_replay_buffer = ReplayBuffer(self.mem_size)\n self.actor_network = ActorNet(self.num_of_states, self.\n actor_hidden_size, self.num_of_actions)\n self.actor_optimizer = optim.Adam(self.actor_network.parameters(),\n lr=self.actor_lr)\n self.critic_network = CriticNet(self.num_of_states, self.\n critic_hidden_size, 1)\n self.critic_optimizer = optim.Adam(self.critic_network.parameters(),\n lr=self.critic_lr)\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n\n def learn(self, rewards_batch, states_batch, actions_batch,\n new_states_batch, new_actions_batch):\n states_batch = np.asarray(states_batch)\n actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n new_states_batch = np.asarray(states_batch)\n new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n V_batch = []\n V_prime_batch = []\n for state, new_state, new_action in zip(states_batch,\n new_states_batch, new_actions_batch):\n state = torch.Tensor(state)\n v_value = self.critic_network.forward(state)\n V_batch.append(v_value)\n new_state = torch.Tensor(new_state)\n v_prime_value = self.critic_network.forward(new_state)\n V_prime_batch.append(v_prime_value)\n log_probs = torch.log(self.actor_network(states_batch))\n selected_log_probs = rewards_batch * log_probs[np.arange(len(\n actions_batch)), actions_batch]\n actor_loss = -selected_log_probs.mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n V_prime_batch = torch.stack(V_prime_batch)\n V_batch = torch.stack(V_batch)\n advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n critic_loss = (V_batch - (rewards_batch + self.critic_gamma *\n V_prime_batch)).pow(2).mean()\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 25 19:21:32 2019\n\n@author: Nikos\n\"\"\"\n\nimport torch\nimport torch.optim as optim\nfrom utilities import *\nfrom model import *\nfrom torch.autograd import Variable\nimport numpy as np\nimport random\n\nclass A2C_agent(object):\n\tdef __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size, \n\t\tcritic_gamma, mem_size, critic_hidden_size, critic_lr, critic_batch_size):\n \n\t\tself.env = env\n\t\tself.actor_hidden_size = actor_hidden_size\n\t\tself.actor_lr = actor_lr\n\t\tself.actor_batch_size = actor_batch_size\n\n\t\tself.critic_hidden_size = critic_hidden_size\n\t\tself.critic_lr = critic_lr\n\t\tself.critic_batch_size = critic_batch_size\n\t\tself.critic_gamma = critic_gamma\n\n\t\tself.mem_size = mem_size\n \n\t\tself.num_of_states = env.observation_space.shape[0]\n\t\tself.num_of_actions = env.action_space.n\n\n\t\tself.experience_replay_buffer = ReplayBuffer(self.mem_size)\n \n # initialize the Actor network (policy)\n\t\tself.actor_network = ActorNet(self.num_of_states, self.actor_hidden_size, self.num_of_actions)\n \n\t\tself.actor_optimizer = optim.Adam(self.actor_network.parameters(), lr = self.actor_lr) \n\n\t\t# initialize the Critic network (v-learning)\n\t\t# The difference between the critic in A2C (here) and the \n\t# critic int he \"vanilla\" Actor-Critic version is that the\n\t# critic in A2C models the value function, hence it needs\n\t# to only output the value of each state and not the Q-value\n\t# for each (state, action) pair. Therefore, the output size\n\t# here needs to be a scalar.\n\t\tself.critic_network = CriticNet(self.num_of_states, self.critic_hidden_size, 1)\n \n\t\tself.critic_optimizer = optim.Adam(self.critic_network.parameters(), lr = self.critic_lr) \n \n\tdef act(self, state):\n \t# compute the action distribution based on the current state via the policy net\n\t\taction_distribution = self.actor_network.forward(state)\n\n # pick an action based on that distribution\n\t\taction = np.random.choice(self.num_of_actions, p = action_distribution.detach().numpy())\n\t\treturn action\n\t\t\n\tdef memorize(self, state, action, new_state, reward, done):\n # this function takes a transition (state, action, new_state, reward, done)\n # and stores it into the experience memory buffer\n\t\tself.experience_replay_buffer.push(state, action, new_state, reward, done)\n\n\tdef learn(self, rewards_batch, states_batch, actions_batch, new_states_batch, new_actions_batch):\n\n\t\t#states_batch = torch.tensor(states_batch, dtype=torch.float)\n\t\tstates_batch = np.asarray(states_batch)\n\t\tactions_batch = torch.tensor(actions_batch, dtype=torch.long)\n\t\trewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n\t\tnew_states_batch = np.asarray(states_batch)\n\t\tnew_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n\t\tV_batch = []\n\t\tV_prime_batch = []\n\n\t\tfor state, new_state, new_action in zip(states_batch,\\\n\t\t\tnew_states_batch, new_actions_batch):\n\t\t\tstate = torch.Tensor(state)\n\n\t\t\tv_value = self.critic_network.forward(state)\n\t\t\t# get q-value for specific action\n\t\t\t#Q = q_values.gather(-1, action)\n\t\t\tV_batch.append(v_value)\n\n\t\t\tnew_state = torch.Tensor(new_state)\n\t\t\tv_prime_value = self.critic_network.forward(new_state)\n\t\t\t#V_prime = q_prime_values.gather(-1, new_action)\n\t\t\tV_prime_batch.append(v_prime_value)\n \n # compute the log of the probabilities that the policy outputs for each state\n\t\tlog_probs = torch.log(self.actor_network(states_batch))\n # pick those log probabilities that correspond to the actions that were selected\n\t\tselected_log_probs = rewards_batch * log_probs[np.arange(len(actions_batch)), actions_batch]\n # compute the monte-carlo estimate by averaging the losses and then form the optimization\n # criterion, which will be the negative log probs.\n\t\tactor_loss = -selected_log_probs.mean()\n\t\tself.actor_optimizer.zero_grad()\n\t\tactor_loss.backward()\n \n # if we need smooth updates we clip the grads between -1 and 1\n #for param in self.online_dqn_network.parameters():\n # param.grad.data.clamp_(-1,1)\n\t\tself.actor_optimizer.step()\n\n\t\t# Compute TD error for V network\n\t\tV_prime_batch = torch.stack(V_prime_batch)\n\t\tV_batch = torch.stack(V_batch)\n\t\t# A(s, a) = r_prime + gamma * V_prime - V\n\t\tadvantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n\t\t#print(deltas)\n\n\t\tcritic_loss = (V_batch - (rewards_batch + self.critic_gamma * V_prime_batch)).pow(2).mean()\n\t\t#print(critic_loss)\n\t\tself.critic_optimizer.zero_grad()\n\t\tcritic_loss.backward()\n\t\tself.critic_optimizer.step()\n\n\n\t\t#return loss",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import *
from json import json
def test_json_basestring():
assert_equals(json("Hello World"), '"Hello World"')
def test_json_integer():
assert_equals(json(9), "9")
def test_json_float():
assert_equals(json(1.234), "1.234")
def test_json_array():
data = [1, 2, 3]
assert_equals(json(data), '[1,2,3]')
def test_json_array02():
data = ['bla', 1, 1.2]
assert_equals(json(data), '["bla",1,1.2]')
def test_json_dict():
data = { 'foo': 'bar' }
assert_equals(json(data), '{"foo":"bar"}')
def test_json_dict_list():
data = { 'foo': [1, 2, 3] }
assert_equals(json(data), '{"foo":[1,2,3]}')
def test_json_dict_int_key():
data = {1:[1, 2, 3] }
assert_equals(json(data), '{"1":[1,2,3]}')
def test_json_dictindict():
data = { 'foo': {'fizz' : 'buzz'} }
assert_equals(json(data), '{"foo":{"fizz":"buzz"}}')
def test_json_2_dict():
data = { 'foo': 'fizz', 'bar' : 'buzz'}
assert_equals(json(data), '{"bar":"buzz","foo":"fizz"}')
def test_json_2_dict_2():
data = { 'foo': 'fizz', 'bar' : 'buzz', 'a': [1, 2, 3]}
assert_equals(json(data), '{"a":[1,2,3],"bar":"buzz","foo":"fizz"}')
def test_empty_list():
data = []
assert_equals(json(data), "[]")
def test_empty_dict():
data = {}
assert_equals(json(data), "{}")
def test_list_with_empty_dict():
data = [{}]
assert_equals(json(data), "[{}]")
def test_rangie2():
data = {"": 0}
assert_equals(json(data), '{"":0}')
def test_none():
assert_equals(json(None), "null")
def test_object():
def closure():
json(object())
assert_raises(TypeError, closure)
def test_bool():
assert_equals(json(True), 'true')
def test_object_in_array():
def closure():
json([object()])
assert_raises(TypeError, closure)
def test_object_in_dict():
def closure():
json({'a': object()})
assert_raises(TypeError, closure)
def test_object_class():
def closure():
json(object)
assert_raises(TypeError, closure)
def test_escape():
assert_equals(json('"') , '"\\""')
|
normal
|
{
"blob_id": "09ce2aeccfd1f3f4f130fd79001db47485cc95c2",
"index": 9891,
"step-1": "<mask token>\n\n\ndef test_json_float():\n assert_equals(json(1.234), '1.234')\n\n\ndef test_json_array():\n data = [1, 2, 3]\n assert_equals(json(data), '[1,2,3]')\n\n\ndef test_json_array02():\n data = ['bla', 1, 1.2]\n assert_equals(json(data), '[\"bla\",1,1.2]')\n\n\ndef test_json_dict():\n data = {'foo': 'bar'}\n assert_equals(json(data), '{\"foo\":\"bar\"}')\n\n\n<mask token>\n\n\ndef test_json_dict_int_key():\n data = {(1): [1, 2, 3]}\n assert_equals(json(data), '{\"1\":[1,2,3]}')\n\n\ndef test_json_dictindict():\n data = {'foo': {'fizz': 'buzz'}}\n assert_equals(json(data), '{\"foo\":{\"fizz\":\"buzz\"}}')\n\n\ndef test_json_2_dict():\n data = {'foo': 'fizz', 'bar': 'buzz'}\n assert_equals(json(data), '{\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\n\n<mask token>\n\n\ndef test_list_with_empty_dict():\n data = [{}]\n assert_equals(json(data), '[{}]')\n\n\n<mask token>\n\n\ndef test_object():\n\n def closure():\n json(object())\n assert_raises(TypeError, closure)\n\n\n<mask token>\n\n\ndef test_object_in_dict():\n\n def closure():\n json({'a': object()})\n assert_raises(TypeError, closure)\n\n\ndef test_object_class():\n\n def closure():\n json(object)\n assert_raises(TypeError, closure)\n\n\ndef test_escape():\n assert_equals(json('\"'), '\"\\\\\"\"')\n",
"step-2": "<mask token>\n\n\ndef test_json_basestring():\n assert_equals(json('Hello World'), '\"Hello World\"')\n\n\n<mask token>\n\n\ndef test_json_float():\n assert_equals(json(1.234), '1.234')\n\n\ndef test_json_array():\n data = [1, 2, 3]\n assert_equals(json(data), '[1,2,3]')\n\n\ndef test_json_array02():\n data = ['bla', 1, 1.2]\n assert_equals(json(data), '[\"bla\",1,1.2]')\n\n\ndef test_json_dict():\n data = {'foo': 'bar'}\n assert_equals(json(data), '{\"foo\":\"bar\"}')\n\n\n<mask token>\n\n\ndef test_json_dict_int_key():\n data = {(1): [1, 2, 3]}\n assert_equals(json(data), '{\"1\":[1,2,3]}')\n\n\ndef test_json_dictindict():\n data = {'foo': {'fizz': 'buzz'}}\n assert_equals(json(data), '{\"foo\":{\"fizz\":\"buzz\"}}')\n\n\ndef test_json_2_dict():\n data = {'foo': 'fizz', 'bar': 'buzz'}\n assert_equals(json(data), '{\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\n\n<mask token>\n\n\ndef test_list_with_empty_dict():\n data = [{}]\n assert_equals(json(data), '[{}]')\n\n\n<mask token>\n\n\ndef test_object():\n\n def closure():\n json(object())\n assert_raises(TypeError, closure)\n\n\n<mask token>\n\n\ndef test_object_in_dict():\n\n def closure():\n json({'a': object()})\n assert_raises(TypeError, closure)\n\n\ndef test_object_class():\n\n def closure():\n json(object)\n assert_raises(TypeError, closure)\n\n\ndef test_escape():\n assert_equals(json('\"'), '\"\\\\\"\"')\n",
"step-3": "<mask token>\n\n\ndef test_json_basestring():\n assert_equals(json('Hello World'), '\"Hello World\"')\n\n\n<mask token>\n\n\ndef test_json_float():\n assert_equals(json(1.234), '1.234')\n\n\ndef test_json_array():\n data = [1, 2, 3]\n assert_equals(json(data), '[1,2,3]')\n\n\ndef test_json_array02():\n data = ['bla', 1, 1.2]\n assert_equals(json(data), '[\"bla\",1,1.2]')\n\n\ndef test_json_dict():\n data = {'foo': 'bar'}\n assert_equals(json(data), '{\"foo\":\"bar\"}')\n\n\ndef test_json_dict_list():\n data = {'foo': [1, 2, 3]}\n assert_equals(json(data), '{\"foo\":[1,2,3]}')\n\n\ndef test_json_dict_int_key():\n data = {(1): [1, 2, 3]}\n assert_equals(json(data), '{\"1\":[1,2,3]}')\n\n\ndef test_json_dictindict():\n data = {'foo': {'fizz': 'buzz'}}\n assert_equals(json(data), '{\"foo\":{\"fizz\":\"buzz\"}}')\n\n\ndef test_json_2_dict():\n data = {'foo': 'fizz', 'bar': 'buzz'}\n assert_equals(json(data), '{\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\n\ndef test_json_2_dict_2():\n data = {'foo': 'fizz', 'bar': 'buzz', 'a': [1, 2, 3]}\n assert_equals(json(data), '{\"a\":[1,2,3],\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\n\n<mask token>\n\n\ndef test_empty_dict():\n data = {}\n assert_equals(json(data), '{}')\n\n\ndef test_list_with_empty_dict():\n data = [{}]\n assert_equals(json(data), '[{}]')\n\n\ndef test_rangie2():\n data = {'': 0}\n assert_equals(json(data), '{\"\":0}')\n\n\ndef test_none():\n assert_equals(json(None), 'null')\n\n\ndef test_object():\n\n def closure():\n json(object())\n assert_raises(TypeError, closure)\n\n\ndef test_bool():\n assert_equals(json(True), 'true')\n\n\n<mask token>\n\n\ndef test_object_in_dict():\n\n def closure():\n json({'a': object()})\n assert_raises(TypeError, closure)\n\n\ndef test_object_class():\n\n def closure():\n json(object)\n assert_raises(TypeError, closure)\n\n\ndef test_escape():\n assert_equals(json('\"'), '\"\\\\\"\"')\n",
"step-4": "<mask token>\n\n\ndef test_json_basestring():\n assert_equals(json('Hello World'), '\"Hello World\"')\n\n\ndef test_json_integer():\n assert_equals(json(9), '9')\n\n\ndef test_json_float():\n assert_equals(json(1.234), '1.234')\n\n\ndef test_json_array():\n data = [1, 2, 3]\n assert_equals(json(data), '[1,2,3]')\n\n\ndef test_json_array02():\n data = ['bla', 1, 1.2]\n assert_equals(json(data), '[\"bla\",1,1.2]')\n\n\ndef test_json_dict():\n data = {'foo': 'bar'}\n assert_equals(json(data), '{\"foo\":\"bar\"}')\n\n\ndef test_json_dict_list():\n data = {'foo': [1, 2, 3]}\n assert_equals(json(data), '{\"foo\":[1,2,3]}')\n\n\ndef test_json_dict_int_key():\n data = {(1): [1, 2, 3]}\n assert_equals(json(data), '{\"1\":[1,2,3]}')\n\n\ndef test_json_dictindict():\n data = {'foo': {'fizz': 'buzz'}}\n assert_equals(json(data), '{\"foo\":{\"fizz\":\"buzz\"}}')\n\n\ndef test_json_2_dict():\n data = {'foo': 'fizz', 'bar': 'buzz'}\n assert_equals(json(data), '{\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\n\ndef test_json_2_dict_2():\n data = {'foo': 'fizz', 'bar': 'buzz', 'a': [1, 2, 3]}\n assert_equals(json(data), '{\"a\":[1,2,3],\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\n\ndef test_empty_list():\n data = []\n assert_equals(json(data), '[]')\n\n\ndef test_empty_dict():\n data = {}\n assert_equals(json(data), '{}')\n\n\ndef test_list_with_empty_dict():\n data = [{}]\n assert_equals(json(data), '[{}]')\n\n\ndef test_rangie2():\n data = {'': 0}\n assert_equals(json(data), '{\"\":0}')\n\n\ndef test_none():\n assert_equals(json(None), 'null')\n\n\ndef test_object():\n\n def closure():\n json(object())\n assert_raises(TypeError, closure)\n\n\ndef test_bool():\n assert_equals(json(True), 'true')\n\n\ndef test_object_in_array():\n\n def closure():\n json([object()])\n assert_raises(TypeError, closure)\n\n\ndef test_object_in_dict():\n\n def closure():\n json({'a': object()})\n assert_raises(TypeError, closure)\n\n\ndef test_object_class():\n\n def closure():\n json(object)\n assert_raises(TypeError, closure)\n\n\ndef test_escape():\n assert_equals(json('\"'), '\"\\\\\"\"')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom nose.tools import *\n\nfrom json import json\n\ndef test_json_basestring():\n assert_equals(json(\"Hello World\"), '\"Hello World\"')\n\ndef test_json_integer():\n assert_equals(json(9), \"9\")\n\ndef test_json_float():\n assert_equals(json(1.234), \"1.234\")\n \ndef test_json_array():\n data = [1, 2, 3]\n assert_equals(json(data), '[1,2,3]')\n\ndef test_json_array02():\n data = ['bla', 1, 1.2]\n assert_equals(json(data), '[\"bla\",1,1.2]')\n\ndef test_json_dict():\n data = { 'foo': 'bar' }\n assert_equals(json(data), '{\"foo\":\"bar\"}')\n \ndef test_json_dict_list():\n data = { 'foo': [1, 2, 3] }\n assert_equals(json(data), '{\"foo\":[1,2,3]}')\n \ndef test_json_dict_int_key():\n data = {1:[1, 2, 3] }\n assert_equals(json(data), '{\"1\":[1,2,3]}')\n \ndef test_json_dictindict():\n data = { 'foo': {'fizz' : 'buzz'} }\n assert_equals(json(data), '{\"foo\":{\"fizz\":\"buzz\"}}')\n\ndef test_json_2_dict():\n data = { 'foo': 'fizz', 'bar' : 'buzz'}\n assert_equals(json(data), '{\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\ndef test_json_2_dict_2():\n data = { 'foo': 'fizz', 'bar' : 'buzz', 'a': [1, 2, 3]}\n assert_equals(json(data), '{\"a\":[1,2,3],\"bar\":\"buzz\",\"foo\":\"fizz\"}')\n\ndef test_empty_list():\n data = []\n assert_equals(json(data), \"[]\")\n \ndef test_empty_dict():\n data = {}\n assert_equals(json(data), \"{}\")\n \ndef test_list_with_empty_dict():\n data = [{}]\n assert_equals(json(data), \"[{}]\")\n \ndef test_rangie2():\n data = {\"\": 0}\n assert_equals(json(data), '{\"\":0}')\n \ndef test_none():\n assert_equals(json(None), \"null\")\n\ndef test_object():\n def closure():\n json(object())\n assert_raises(TypeError, closure)\n \ndef test_bool():\n assert_equals(json(True), 'true')\n \ndef test_object_in_array():\n def closure():\n json([object()])\n assert_raises(TypeError, closure)\n\ndef test_object_in_dict():\n def closure():\n json({'a': object()})\n assert_raises(TypeError, closure)\n \ndef test_object_class():\n def closure():\n json(object)\n assert_raises(TypeError, closure)\n\ndef test_escape():\n assert_equals(json('\"') , '\"\\\\\"\"')\n \n",
"step-ids": [
12,
13,
19,
22,
24
]
}
|
[
12,
13,
19,
22,
24
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
pass
<|reserved_special_token_0|>
setup(name='django-defaultsite', version='1.1', packages=find_packages(
'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},
include_package_data=True, zip_safe=False, author='Oppian System Ltd',
author_email='[email protected]', description=
'django-defaultsiteSets the Site object in django to something better then example.com.'
, license='LICENSE.txt', keywords='django site example.com',
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Web Environment', 'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Framework :: Django'], url=
'http://oppian.com/labs/django-defaultsite/', long_description=open(
'README.txt').read())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
pass
from setuptools import setup, find_packages
setup(name='django-defaultsite', version='1.1', packages=find_packages(
'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},
include_package_data=True, zip_safe=False, author='Oppian System Ltd',
author_email='[email protected]', description=
'django-defaultsiteSets the Site object in django to something better then example.com.'
, license='LICENSE.txt', keywords='django site example.com',
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Web Environment', 'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Framework :: Django'], url=
'http://oppian.com/labs/django-defaultsite/', long_description=open(
'README.txt').read())
<|reserved_special_token_1|>
'''
Created on 5 Mar 2010
@author: oppianmatt
'''
# hook to find setup tools if not installed
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
pass
from setuptools import setup, find_packages
setup(
name = "django-defaultsite",
version = "1.1",
packages = find_packages('src'),
package_dir = {'': 'src'},
package_data={'': ['LICENSE']},
include_package_data=True,
zip_safe=False,
# metadata for upload to PyPI
author = "Oppian System Ltd",
author_email = "[email protected]",
description = "django-defaultsiteSets the Site object in django to something better then example.com.",
license = 'LICENSE.txt',
keywords = "django site example.com",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
url = "http://oppian.com/labs/django-defaultsite/",
long_description=open('README.txt').read(),
)
|
flexible
|
{
"blob_id": "5580e5942370c925b759b09675306cdfbc7dd4f1",
"index": 3633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\n<mask token>\nsetup(name='django-defaultsite', version='1.1', packages=find_packages(\n 'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},\n include_package_data=True, zip_safe=False, author='Oppian System Ltd',\n author_email='[email protected]', description=\n 'django-defaultsiteSets the Site object in django to something better then example.com.'\n , license='LICENSE.txt', keywords='django site example.com',\n classifiers=['Development Status :: 3 - Alpha',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Framework :: Django'], url=\n 'http://oppian.com/labs/django-defaultsite/', long_description=open(\n 'README.txt').read())\n",
"step-3": "<mask token>\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\nfrom setuptools import setup, find_packages\nsetup(name='django-defaultsite', version='1.1', packages=find_packages(\n 'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},\n include_package_data=True, zip_safe=False, author='Oppian System Ltd',\n author_email='[email protected]', description=\n 'django-defaultsiteSets the Site object in django to something better then example.com.'\n , license='LICENSE.txt', keywords='django site example.com',\n classifiers=['Development Status :: 3 - Alpha',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Framework :: Django'], url=\n 'http://oppian.com/labs/django-defaultsite/', long_description=open(\n 'README.txt').read())\n",
"step-4": "'''\nCreated on 5 Mar 2010\n\n@author: oppianmatt\n'''\n\n# hook to find setup tools if not installed\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\n\nfrom setuptools import setup, find_packages\nsetup(\n name = \"django-defaultsite\",\n version = \"1.1\",\n packages = find_packages('src'),\n package_dir = {'': 'src'},\n package_data={'': ['LICENSE']},\n include_package_data=True,\n zip_safe=False,\n \n # metadata for upload to PyPI\n author = \"Oppian System Ltd\",\n author_email = \"[email protected]\",\n description = \"django-defaultsiteSets the Site object in django to something better then example.com.\",\n license = 'LICENSE.txt',\n keywords = \"django site example.com\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Framework :: Django',\n ],\n url = \"http://oppian.com/labs/django-defaultsite/\",\n long_description=open('README.txt').read(),\n)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# coding:utf-8
# 改进小红球
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts) # 打乱 starts
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height() # 获取高度坐标
self.canvas_width = self.canvas.winfo_width() # 获取宽度坐标
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id) # 获取坐标
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
# 把小球加入主循环
while 1:
ball.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
|
normal
|
{
"blob_id": "cb1e73d172314c8d3d31f6e49fa67582375c0c58",
"index": 7183,
"step-1": "class Ball:\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Ball:\n <mask token>\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n\n<mask token>\n",
"step-3": "class Ball:\n\n def __init__(self, canvas, paddle, color):\n self.canvas = canvas\n self.paddle = paddle\n self.id = canvas.create_oval(10, 10, 25, 25, fill=color)\n self.canvas.move(self.id, 245, 100)\n starts = [-3, -2, -1, 1, 2, 3]\n random.shuffle(starts)\n self.x = starts[0]\n self.y = -3\n self.canvas_height = self.canvas.winfo_height()\n self.canvas_width = self.canvas.winfo_width()\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n\n<mask token>\n",
"step-4": "class Ball:\n\n def __init__(self, canvas, paddle, color):\n self.canvas = canvas\n self.paddle = paddle\n self.id = canvas.create_oval(10, 10, 25, 25, fill=color)\n self.canvas.move(self.id, 245, 100)\n starts = [-3, -2, -1, 1, 2, 3]\n random.shuffle(starts)\n self.x = starts[0]\n self.y = -3\n self.canvas_height = self.canvas.winfo_height()\n self.canvas_width = self.canvas.winfo_width()\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n\nwhile 1:\n ball.draw()\n tk.update_idletasks()\n tk.update()\n time.sleep(0.01)\n",
"step-5": "#!/usr/bin/env python3\n# coding:utf-8\n\n# 改进小红球\nclass Ball:\n def __init__(self, canvas, paddle, color):\n self.canvas = canvas\n self.paddle = paddle\n self.id = canvas.create_oval(10, 10, 25, 25, fill=color)\n self.canvas.move(self.id, 245, 100)\n starts = [-3, -2, -1, 1, 2, 3]\n random.shuffle(starts) # 打乱 starts\n self.x = starts[0]\n self.y = -3\n self.canvas_height = self.canvas.winfo_height() # 获取高度坐标\n self.canvas_width = self.canvas.winfo_width() # 获取宽度坐标\n\n def draw(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id) # 获取坐标\n if pos[1] <= 0:\n self.y = 3\n if pos[3] >= self.canvas_height:\n self.y = -3\n if self.hit_paddle(pos) == True:\n self.y = -3\n if pos[0] <= 0:\n self.x = 3\n if pos[2] >= self.canvas_width:\n self.x = -3\n\n# 把小球加入主循环\nwhile 1:\n ball.draw()\n tk.update_idletasks()\n tk.update()\n time.sleep(0.01)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution:
def countBits(self, num: int) -> List[int]:
total = []
for i in range(num + 1):
counter = bin(i).count('1')
# for j in bin(i):
# if j == '1':
# counter += 1
total.append(counter)
return total
# bin(i).count('1') is the easy way to do it with built in functions
# for loop to search each char in the returned string is slower
|
normal
|
{
"blob_id": "c6554ff18c23a61d3694e73b808f44c96f9a19c4",
"index": 2012,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def countBits(self, num: int) ->List[int]:\n total = []\n for i in range(num + 1):\n counter = bin(i).count('1')\n total.append(counter)\n return total\n",
"step-4": "class Solution:\n def countBits(self, num: int) -> List[int]:\n total = []\n for i in range(num + 1):\n counter = bin(i).count('1')\n # for j in bin(i):\n # if j == '1':\n # counter += 1\n total.append(counter)\n \n return total\n \n # bin(i).count('1') is the easy way to do it with built in functions\n # for loop to search each char in the returned string is slower\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('lots', '0012_auto_20200425_1720')]
operations = [migrations.AlterField(model_name='lots', name='photo',
field=models.ImageField(default='images/default.png', upload_to=
lots.models.path_and_rename))]
<|reserved_special_token_1|>
from django.db import migrations, models
import lots.models
class Migration(migrations.Migration):
dependencies = [('lots', '0012_auto_20200425_1720')]
operations = [migrations.AlterField(model_name='lots', name='photo',
field=models.ImageField(default='images/default.png', upload_to=
lots.models.path_and_rename))]
<|reserved_special_token_1|>
# Generated by Django 3.0.5 on 2020-04-25 15:35
from django.db import migrations, models
import lots.models
class Migration(migrations.Migration):
dependencies = [
('lots', '0012_auto_20200425_1720'),
]
operations = [
migrations.AlterField(
model_name='lots',
name='photo',
field=models.ImageField(default='images/default.png', upload_to=lots.models.path_and_rename),
),
]
|
flexible
|
{
"blob_id": "b36f3ffed888edaa7716f712f1549dc205799caf",
"index": 6338,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lots', '0012_auto_20200425_1720')]\n operations = [migrations.AlterField(model_name='lots', name='photo',\n field=models.ImageField(default='images/default.png', upload_to=\n lots.models.path_and_rename))]\n",
"step-4": "from django.db import migrations, models\nimport lots.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lots', '0012_auto_20200425_1720')]\n operations = [migrations.AlterField(model_name='lots', name='photo',\n field=models.ImageField(default='images/default.png', upload_to=\n lots.models.path_and_rename))]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-04-25 15:35\n\nfrom django.db import migrations, models\nimport lots.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lots', '0012_auto_20200425_1720'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='lots',\n name='photo',\n field=models.ImageField(default='images/default.png', upload_to=lots.models.path_and_rename),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
y_true = [7, 3, 3, 4, 9, 9, 2, 5, 0, 0, 6, 3, 1, 6, 8, 7, 9, 7, 4, 2, 0, 1,
4, 1, 7, 7, 5, 0, 8, 0, 1, 7, 4, 2, 2, 4, 9, 3, 1, 7, 1, 2, 1, 7, 5, 9,
9, 4, 8, 5, 7, 2, 7, 5, 5, 6, 6, 1, 2, 6, 6, 5, 3, 2, 3, 8, 8, 8, 8, 5,
3, 4, 3, 2, 8, 1, 9, 0, 6, 8, 6, 1, 1, 1, 5, 4, 8, 8, 5, 5, 8, 6, 4, 4,
6, 9, 8, 1, 5, 5]
y_pred_prob = [[0.0597563199698925, 0.1344364434480667, 0.1173347756266594,
0.11292721331119537, 0.10652001202106476, 0.13155865669250488,
0.10057594627141953, 0.10029518604278564, 0.10313529521226883,
0.03346000984311104], [0.0002930850023403764, 0.23393571376800537,
0.09061524271965027, 0.21862193942070007, 0.04659481346607208,
0.04461496323347092, 0.0952368974685669, 0.2075100988149643,
0.0616493821144104, 0.0009278177167288959], [0.22330643236637115,
1.0582012919257977e-06, 0.22777651250362396, 0.20880192518234253,
9.877869615593227e-07, 0.0006437229458242655, 0.1556401550769806,
7.201562368663872e-08, 0.18382851779460907, 5.064675860921852e-07], [
1.7682419638731517e-05, 0.001197152421809733, 0.015430454164743423,
0.0037515582516789436, 0.32882484793663025, 0.0003495111595839262,
0.012810198590159416, 0.054448556154966354, 0.30387693643569946,
0.27929291129112244], [0.16070464253425598, 4.810986276027052e-09,
0.15206283330917358, 0.004463076591491699, 0.1652054488658905,
0.0038724008481949568, 0.17216043174266815, 0.13407163321971893,
0.029512932524085045, 0.17794682085514069], [0.10922636836767197,
2.2864300319724862e-07, 0.11546860635280609, 0.001813476555980742,
0.1788507103919983, 0.005888130981475115, 0.18413811922073364,
0.10866158455610275, 0.10712066292762756, 0.18883220851421356], [
0.005557563621550798, 0.0001692363148322329, 0.35343053936958313,
0.0015008420450612903, 0.00037875055568292737, 0.2150292843580246,
0.014169459231197834, 0.03244209289550781, 0.33539846539497375,
0.041923996061086655], [0.193454310297966, 3.662989183794707e-05,
0.10065275430679321, 0.00039752188604325056, 0.16119857132434845,
0.19390884041786194, 0.07022294402122498, 0.02460072562098503,
0.16083283722400665, 0.0946948304772377], [0.28058794140815735,
1.1208027217435301e-06, 0.018203848972916603, 0.16030532121658325,
0.00018859952979255468, 0.21325571835041046, 0.2328961044549942,
0.007604319602251053, 0.04473938047885895, 0.04221738502383232], [
0.1718112975358963, 7.514636672567576e-05, 0.15386143326759338,
0.008414546959102154, 0.001738831982947886, 0.15720322728157043,
0.17100712656974792, 0.15586316585540771, 0.104509636759758,
0.07551562041044235], [0.001471314812079072, 0.008587654680013657,
0.0367623046040535, 0.011750160716474056, 0.07068527489900589,
0.4173307418823242, 0.12449752539396286, 0.014547907747328281,
0.2990296185016632, 0.01533727627247572], [0.005052714608609676,
0.0073812128975987434, 0.009834956377744675, 0.33292853832244873,
0.0018518454162403941, 0.0015299966325983405, 0.002040529390797019,
0.3055168688297272, 0.32741934061050415, 0.006443792954087257], [
0.0011697597801685333, 0.20749542117118835, 0.07009387016296387,
0.08994801342487335, 0.09965154528617859, 0.060963381081819534,
0.13158728182315826, 0.1365581601858139, 0.11990636587142944,
0.08262615650892258], [0.020798824727535248, 1.469431822442857e-06,
0.016172533854842186, 0.021048342809081078, 0.009139545261859894,
0.3956705331802368, 0.3814408779144287, 7.980810551089235e-06,
0.1391601711511612, 0.016559595242142677], [0.0008747534011490643,
0.0009511907119303942, 0.055323366075754166, 0.05426914989948273,
0.03363798186182976, 0.12827005982398987, 0.03197509050369263,
0.0008451330941170454, 0.37859639525413513, 0.3152569532394409], [
0.001832291018217802, 9.253426833311096e-05, 0.27192848920822144,
0.18078717589378357, 0.004130060318857431, 0.00929891224950552,
0.1695500910282135, 0.29965919256210327, 0.020460698753595352,
0.042260222136974335], [0.15259969234466553, 0.00015921871818136424,
0.16849327087402344, 0.002068838570266962, 0.17735524475574493,
0.02342645265161991, 0.18245863914489746, 0.00010533139720791951,
0.11123484373092651, 0.1820984184741974], [0.18936939537525177,
1.7293215250901994e-06, 0.029253976419568062, 0.1424887329339981,
0.01099975686520338, 0.0074686696752905846, 0.053486552089452744,
0.2111600935459137, 0.14551354944705963, 0.21025745570659637], [
3.861714503727853e-05, 0.1669524759054184, 0.00032175786327570677,
0.15850232541561127, 0.1955566704273224, 0.012984608300030231,
0.14730143547058105, 0.066555455327034, 0.1175893247127533,
0.13419757783412933], [0.1504199206829071, 0.006808706559240818,
0.22468900680541992, 0.18946652114391327, 1.2391226846375503e-05,
0.10332755744457245, 0.15032899379730225, 2.30663204092707e-06,
0.17487214505672455, 7.243863365147263e-05], [0.23918452858924866,
5.279692683046733e-09, 0.0671931579709053, 0.2041931003332138,
9.380520350532606e-05, 0.18892300128936768, 0.16166524589061737,
1.2340686907919007e-06, 0.1280936300754547, 0.010652361437678337], [
0.0019602354150265455, 0.17319674789905548, 0.16884981095790863,
0.025876348838210106, 0.11373495310544968, 0.034116633236408234,
0.09377618134021759, 0.16857513785362244, 0.10720878094434738,
0.11270517110824585], [0.006008224096149206, 7.275425741681829e-05,
0.002679133554920554, 0.005456522107124329, 0.2852444648742676,
0.007294526789337397, 0.26774612069129944, 0.0033797386568039656,
0.15357472002506256, 0.26854372024536133], [0.0020487161818891764,
0.18302913010120392, 0.17970730364322662, 0.03157859668135643,
0.10424197465181351, 0.028137331828475, 0.049388039857149124,
0.17323219776153564, 0.13171784579753876, 0.11691895872354507], [
0.011249794624745846, 0.0003711018362082541, 0.32693105936050415,
0.0010822461917996407, 0.0076926033943891525, 0.04566335678100586,
0.005700047593563795, 0.32916736602783203, 0.09476791322231293,
0.17737449705600739], [0.0001925578253576532, 7.067231763357995e-06,
0.0001896199828479439, 0.09954455494880676, 0.23005598783493042,
0.2152310460805893, 0.09002267569303513, 0.017976609990000725,
0.0920918807387352, 0.25468799471855164], [0.0006383731961250305,
3.095208057857235e-06, 0.0005969868507236242, 0.41469672322273254,
0.0053739529103040695, 0.40698617696762085, 0.08218759298324585,
0.0003528161614667624, 0.07473969459533691, 0.014424380846321583], [
0.19537049531936646, 3.243912300235352e-13, 0.005169959273189306,
0.17694340646266937, 2.949438930954784e-05, 0.1400780826807022,
0.18864554166793823, 3.857006959151477e-06, 0.18823771178722382,
0.10552132874727249], [0.009722508490085602, 3.8531984500878025e-06,
0.07383214682340622, 0.03598225489258766, 0.07267675548791885,
0.1459459662437439, 0.07249364256858826, 0.002293274737894535,
0.48588359355926514, 0.1011660099029541], [0.21651780605316162,
9.559274261050632e-09, 0.14371894299983978, 0.13431811332702637,
2.7394575226935558e-05, 0.1838626116514206, 0.17265450954437256,
0.00012304158008191735, 0.12219242751598358, 0.0265849307179451], [
4.430914850672707e-05, 0.2043066918849945, 0.0002825123374350369,
0.16263452172279358, 0.1939067542552948, 0.1427866667509079,
0.11921370774507523, 0.0028419536538422108, 0.06556723266839981,
0.10841585695743561], [0.004471424967050552, 0.1858968585729599,
0.17653658986091614, 0.01416453905403614, 0.008144107647240162,
0.0843614935874939, 0.05890577659010887, 0.18505530059337616,
0.10232891887426376, 0.18013498187065125], [0.00041712025995366275,
1.1021310228898074e-06, 0.08412905037403107, 0.0002837374631781131,
0.2740859091281891, 0.013903344981372356, 0.08929961919784546,
0.2733091115951538, 0.2233879268169403, 0.04118315503001213], [
0.04552318528294563, 0.020853176712989807, 0.26410210132598877,
0.23437173664569855, 2.1701146124541992e-06, 0.10220374912023544,
0.07447297871112823, 7.592303154524416e-05, 0.25814488530158997,
0.00025002588517963886], [0.024719374254345894, 0.00217414740473032,
0.26734668016433716, 0.17261573672294617, 0.003498602891340852,
0.05698162689805031, 0.2737174332141876, 8.039058593567461e-05,
0.19880186021327972, 6.410985952243209e-05], [0.12234598398208618,
6.703280632791575e-06, 0.015603234991431236, 0.013786871917545795,
0.21616478264331818, 0.005412149243056774, 0.11406012624502182,
0.12291428446769714, 0.18262456357479095, 0.20708128809928894], [
0.193313866853714, 6.033819488493464e-08, 0.14491458237171173,
0.2349807769060135, 0.0006736826617270708, 0.003743150969967246,
0.12457092851400375, 0.004962997976690531, 0.23268520832061768,
0.060154590755701065], [0.006641837302595377, 0.005113706924021244,
0.060135774314403534, 0.37294134497642517, 0.0001917753543239087,
0.35536521673202515, 0.003515040036290884, 0.00014136293611954898,
0.19584619998931885, 0.00010780058073578402], [0.00022568553686141968,
0.1758676916360855, 0.08169379830360413, 0.11927571147680283,
0.14987629652023315, 0.026822827756404877, 0.09613550454378128,
0.14441852271556854, 0.11029191315174103, 0.09539227187633514], [
0.028152454644441605, 0.04798303544521332, 0.06989692151546478,
0.07051544636487961, 0.07356826215982437, 0.05468234792351723,
0.11397064477205276, 0.2294078767299652, 0.0822836384177208,
0.22953952848911285], [0.0009083361364901066, 0.16873282194137573,
0.040142301470041275, 0.13509070873260498, 0.16045929491519928,
0.09148524701595306, 0.0939648225903511, 0.13889746367931366,
0.043392572551965714, 0.12692658603191376], [7.008769898675382e-05,
0.0012455701362341642, 0.4437786936759949, 0.03154001384973526,
0.0033613061532378197, 0.0024434190709143877, 0.3866567313671112,
0.0005211094976402819, 0.13020911812782288, 0.00017409549036528915], [
0.00034864526242017746, 0.21021592617034912, 0.005514794960618019,
0.11704950034618378, 0.08421261608600616, 0.13176649808883667,
0.11882488429546356, 0.008054501377046108, 0.1467529684305191,
0.1772596538066864], [0.036879003047943115, 0.0014911789912730455,
0.2685071527957916, 0.0029583016876131296, 0.011879128403961658,
0.030892902985215187, 0.08989892154932022, 0.29645001888275146,
0.04054954648017883, 0.2204938679933548], [0.0064177061431109905,
0.0045189931988716125, 0.013788403943181038, 0.18153700232505798,
0.0003662402159534395, 0.5257023572921753, 0.06426692008972168,
9.742573638504837e-06, 0.2026320844888687, 0.000760772149078548], [
0.0017538872780278325, 0.0002046643348876387, 0.04638877511024475,
0.11219469457864761, 0.1732793003320694, 0.000888414157088846,
0.1527005136013031, 0.171849325299263, 0.16653017699718475,
0.17421048879623413], [6.957617006264627e-05, 3.015168840647675e-05,
0.05601977929472923, 0.06104991212487221, 0.14622464776039124,
0.0013683908618986607, 0.004713970702141523, 0.26153290271759033,
0.21816983819007874, 0.25082090497016907], [0.001964711584150791,
0.14094221591949463, 0.04670453444123268, 0.11537310481071472,
0.1456061750650406, 0.021807175129652023, 0.1023702397942543,
0.14592182636260986, 0.1320936679840088, 0.14721626043319702], [
0.0013557883212342858, 5.542307803807489e-07, 0.015518834814429283,
0.020929962396621704, 0.12795883417129517, 0.012969551607966423,
0.011510342359542847, 0.3424086570739746, 0.3332746922969818,
0.1340728998184204], [0.0951327458024025, 0.03636496141552925,
0.018829435110092163, 0.060135968029499054, 0.1569897085428238,
0.1514764130115509, 0.13258931040763855, 0.1450430303812027,
0.04603665694594383, 0.15740196406841278], [0.17052830755710602,
1.5615187294315547e-06, 0.0013229812029749155, 0.12005076557397842,
0.021564221009612083, 0.024421295151114464, 0.17088675498962402,
0.15222683548927307, 0.1693890392780304, 0.16960804164409637], [
0.006946968380361795, 0.3011370897293091, 0.3187958002090454,
0.06604688614606857, 0.011190904304385185, 0.05437859520316124,
0.020502492785453796, 0.010224146768450737, 0.21062366664409637,
0.00015340560639742762], [0.003341993084177375, 0.0016007163794711232,
0.0007675797096453607, 0.18986503779888153, 0.1190534457564354,
0.02811228297650814, 0.09639428555965424, 0.21583504974842072,
0.13505271077156067, 0.2099769562482834], [0.042331017553806305,
0.00029962626285851, 0.0023094473872333765, 0.18676534295082092,
0.000317152967909351, 0.48982951045036316, 0.1871659755706787,
8.205944141082e-06, 0.09039845317602158, 0.0005752819124609232], [
0.27066469192504883, 0.0001488085399614647, 0.025224560871720314,
0.03236522525548935, 0.00022321399592328817, 0.3199988305568695,
0.20726615190505981, 2.1540354282478802e-05, 0.13308577239513397,
0.011001424863934517], [0.21046556532382965, 8.32586906085453e-08,
0.050842639058828354, 0.0012313498882576823, 0.17998859286308289,
0.005802170839160681, 0.22032563388347626, 9.771327313501388e-06,
0.2085702270269394, 0.12276387959718704], [0.278763085603714,
2.956639932882865e-10, 0.2363770455121994, 0.0021949675865471363,
0.024400619789958, 0.01081052329391241, 0.2788945734500885,
0.000592902593780309, 0.09800171107053757, 0.06996453553438187], [
0.0012440741993486881, 0.0002501744020264596, 0.039189230650663376,
0.003109667217358947, 0.1353403925895691, 0.17648975551128387,
0.29823172092437744, 0.0005026640137657523, 0.1873668134212494,
0.15827545523643494], [4.636057929019444e-05, 0.004471238702535629,
0.010865537449717522, 0.03406133875250816, 0.2391168773174286,
0.0102084307000041, 0.24508318305015564, 0.10957624763250351,
0.10304577648639679, 0.24352511763572693], [0.007771539501845837,
0.003819737583398819, 0.05605701357126236, 0.0013185413554310799,
0.026425426825881004, 0.37273845076560974, 0.39364394545555115,
3.468452996457927e-05, 0.13644644618034363, 0.0017443000106140971], [
0.0042862421832978725, 4.118454022261631e-09, 0.24541069567203522,
1.311416235694196e-05, 0.002639196580275893, 0.2002275139093399,
0.35612747073173523, 8.159701246768236e-05, 0.11912810802459717,
0.07208611816167831], [0.10790199786424637, 0.00018712706514634192,
0.001723292050883174, 0.3369658291339874, 0.005216643214225769,
0.323357492685318, 0.04629630222916603, 0.0006358266109600663,
0.17700347304344177, 0.0007120332447811961], [0.01004449650645256,
0.0038342783227562904, 0.0029477709904313087, 0.39860454201698303,
0.000900272571016103, 0.32782217860221863, 0.010686549358069897,
0.0006012170924805105, 0.23407192528247833, 0.010486727580428123], [
0.0015078516444191337, 0.23596949875354767, 0.4038705825805664,
0.04463784024119377, 0.00036313795135356486, 0.005906661506742239,
0.012559221126139164, 0.010579549707472324, 0.2843676507472992,
0.0002381248341407627], [0.1887362003326416, 0.0019065006636083126,
0.2840288579463959, 0.2984219193458557, 4.9067231884691864e-05,
0.1615515947341919, 0.012938770465552807, 0.00029289082158356905,
0.052058152854442596, 1.6269357729470357e-05], [0.0006827416946180165,
2.276465056638699e-05, 0.023704057559370995, 0.16121432185173035,
0.0033186341170221567, 0.004117893520742655, 0.03627816215157509,
0.009822812862694263, 0.7281517386436462, 0.032687313854694366], [
0.0011369712883606553, 0.27387163043022156, 0.07185991108417511,
0.15628814697265625, 0.002854800783097744, 0.23154565691947937,
0.03204796463251114, 0.003870188258588314, 0.22623319923877716,
0.00029159500263631344], [0.0035695999395102262, 0.26706114411354065,
0.1508740484714508, 0.0013921442441642284, 0.019328434020280838,
0.13771453499794006, 0.029891734942793846, 0.03509771451354027,
0.24692872166633606, 0.1081417053937912], [0.000882012362126261,
2.536918327677995e-05, 0.0450599268078804, 0.412322998046875,
0.0025211411993950605, 0.002278776839375496, 0.011372447945177555,
0.1770726591348648, 0.33388030529022217, 0.014584112912416458], [
0.21903501451015472, 5.910552047794226e-09, 0.022012481465935707,
0.20099963247776031, 1.0874355211853981e-05, 0.21909210085868835,
0.21668335795402527, 4.337367798257219e-08, 0.12212178856134415,
4.4732783862855285e-05], [0.014651631936430931, 0.00830799899995327,
0.005935078486800194, 0.3953670263290405, 1.1293817806290463e-05,
0.4299878776073456, 0.017106691375374794, 0.00014334742445498705,
0.11808823049068451, 0.010400976054370403], [0.010301091708242893,
0.01435689628124237, 0.07430031895637512, 0.06989920139312744,
0.2338510900735855, 0.053795550018548965, 0.22257547080516815,
0.0029012206941843033, 0.09203658252954483, 0.22598253190517426], [
0.033016644418239594, 0.0020125852897763252, 0.06661045551300049,
0.4920836091041565, 0.00025867935619316995, 0.07482428848743439,
0.13923810422420502, 0.00012527030776254833, 0.19180776178836823,
2.269313517899718e-05], [0.1325867474079132, 0.004940022714436054,
0.22300080955028534, 0.2727201282978058, 3.310650572529994e-05,
0.12915031611919403, 0.01339033618569374, 1.0927167750196531e-05,
0.22410929203033447, 5.8520683523966e-05], [0.126132994890213,
0.0013935434399172664, 0.17098797857761383, 0.00039779843064025044,
0.07732491940259933, 0.16493096947669983, 0.014501826837658882,
0.03405503183603287, 0.20594964921474457, 0.2043251097202301], [
0.0008475463255308568, 0.19114449620246887, 0.03174148499965668,
0.1596948355436325, 0.1830475926399231, 0.11398201435804367,
0.11080365628004074, 0.10536272078752518, 0.05745834857225418,
0.04591764137148857], [0.0009525367058813572, 0.0012388192117214203,
0.0006522738258354366, 0.15977761149406433, 0.2019728273153305,
0.037797972559928894, 0.19880010187625885, 0.008799873292446136,
0.18693988025188446, 0.20306788384914398], [0.21417981386184692,
1.8215121144748991e-07, 0.11546390503644943, 0.10518436878919601,
5.3784842748427764e-05, 0.17964830994606018, 0.1753360480070114,
0.005312803667038679, 0.07569659501314163, 0.1291242241859436], [
0.03322113677859306, 1.1228289409359604e-08, 0.11529551446437836,
0.006697801407426596, 0.020004654303193092, 0.2904326617717743,
0.3397071361541748, 6.173769179440569e-06, 0.1187906265258789,
0.07584403455257416], [0.00018722846289165318, 0.00015633362636435777,
0.027305739000439644, 0.30433472990989685, 0.12216899544000626,
0.0051543135195970535, 0.07717369496822357, 5.6467473768861964e-05,
0.46220865845680237, 0.0012535307323560119], [0.2223890870809555,
1.8010264568601997e-07, 0.051188305020332336, 0.06915734708309174,
0.007792292162775993, 0.13037307560443878, 0.4795873761177063,
6.65841726004146e-05, 0.03377178683876991, 0.0056741489097476006], [
0.0011432061437517405, 0.172257199883461, 0.08959532529115677,
0.09976792335510254, 0.13487820327281952, 0.025573352351784706,
0.11224105209112167, 0.1427890509366989, 0.12529729306697845,
0.09645748883485794], [0.00039081714930944145, 0.17529502511024475,
0.07816692441701889, 0.12808731198310852, 0.13959045708179474,
0.04451143741607666, 0.07863735407590866, 0.1518080085515976,
0.09225541353225708, 0.11125729233026505], [0.0005360758514143527,
0.1871286779642105, 0.09343081712722778, 0.10187795013189316,
0.15403643250465393, 0.03745483607053757, 0.10108820348978043,
0.1381213515996933, 0.1196260005235672, 0.0666997954249382], [
0.02377643622457981, 0.002874232828617096, 0.06835681945085526,
0.08628982305526733, 0.16734763979911804, 0.1884264051914215,
0.06887176632881165, 0.1883554309606552, 0.11966855823993683,
0.0860329195857048], [0.0019290593918412924, 0.0004132240719627589,
0.08087942749261856, 0.00133050128351897, 0.2057691514492035,
0.014698517508804798, 0.10668473690748215, 0.2002524882555008,
0.19643288850784302, 0.19160999357700348], [4.1589693864807487e-05,
3.0074079404585063e-06, 0.00946643017232418, 0.0028675245121121407,
0.339987188577652, 0.006530506536364555, 0.21062259376049042,
5.006019819120411e-06, 0.4303286373615265, 0.00014742799976374954], [
0.23467645049095154, 3.957170217048535e-14, 0.016559595242142677,
0.22702592611312866, 0.0004185910802334547, 0.0031147561967372894,
0.2260916531085968, 2.4497327899553056e-07, 0.2333890199661255,
0.05872354656457901], [0.1723964959383011, 1.4810979109824984e-07,
0.001400468056090176, 0.3012116253376007, 0.00017689657397568226,
0.29611334204673767, 0.013564502820372581, 0.04992862418293953,
0.15185707807540894, 0.013350787572562695], [0.18757264316082,
1.502647393181178e-07, 0.0013043361250311136, 0.08373606950044632,
0.0005724140792153776, 0.1799388974905014, 0.14538954198360443,
0.16594813764095306, 0.06483398377895355, 0.17070381343364716], [
0.008307700976729393, 0.0005032537155784667, 0.04173918813467026,
0.055757056921720505, 0.2954571545124054, 0.046274807304143906,
0.15145555138587952, 0.00160416669677943, 0.36763912439346313,
0.031262170523405075], [0.03202534094452858, 2.929154447883775e-07,
0.03331722691655159, 0.0002443870762363076, 0.021324075758457184,
0.3864181637763977, 0.39420267939567566, 3.2187076612899546e-06,
0.08215467631816864, 0.050310224294662476], [0.03041147254407406,
3.317395247393051e-10, 0.013215649873018265, 0.009000282734632492,
0.15260590612888336, 9.569835674483329e-05, 0.22718068957328796,
0.0983223170042038, 0.23328886926174164, 0.23587895929813385], [
0.0017376767937093973, 0.01800091378390789, 0.09461784362792969,
0.008886604569852352, 0.23299837112426758, 0.03532419353723526,
0.20058980584144592, 0.1702878624200821, 0.06943482160568237,
0.1681220531463623], [0.26592451333999634, 1.378083283043452e-07,
0.26663097739219666, 0.00043869472574442625, 0.0753256231546402,
0.000345755455782637, 0.2718716561794281, 0.09590824693441391,
0.021168876439332962, 0.0023856020998209715], [0.007719929795712233,
0.000273746729362756, 0.06954099237918854, 0.11292484402656555,
0.17693056166172028, 0.0036023242864757776, 0.16335690021514893,
0.1139131560921669, 0.17289915680885315, 0.17883846163749695], [
0.0002722161589190364, 0.0014734293799847364, 0.0001780118327587843,
0.0718056932091713, 0.219150573015213, 0.02937471494078636,
0.15243956446647644, 0.07647080719470978, 0.21917390823364258,
0.22966115176677704], [0.0008591399528086185, 0.27216723561286926,
0.030793067067861557, 0.040201541036367416, 0.07587726414203644,
0.06215333193540573, 0.16188929975032806, 0.04154059290885925,
0.21999017894268036, 0.09452840685844421], [0.156771719455719,
0.0009459690772928298, 0.08676373958587646, 0.012071664445102215,
0.046294376254081726, 0.1705559939146042, 0.05631829798221588,
0.16554586589336395, 0.14995504915714264, 0.15477733314037323], [
0.0036007703747600317, 0.0036146841011941433, 0.007429149001836777,
0.10190737992525101, 0.0016259902622550726, 0.45585712790489197,
0.04189519211649895, 7.317630092984473e-07, 0.3802386522293091,
0.003830441040918231]]
|
normal
|
{
"blob_id": "593d3221e34c0eef51228082d767d8516ec93ca2",
"index": 8002,
"step-1": "<mask token>\n",
"step-2": "y_true = [7, 3, 3, 4, 9, 9, 2, 5, 0, 0, 6, 3, 1, 6, 8, 7, 9, 7, 4, 2, 0, 1,\n 4, 1, 7, 7, 5, 0, 8, 0, 1, 7, 4, 2, 2, 4, 9, 3, 1, 7, 1, 2, 1, 7, 5, 9,\n 9, 4, 8, 5, 7, 2, 7, 5, 5, 6, 6, 1, 2, 6, 6, 5, 3, 2, 3, 8, 8, 8, 8, 5,\n 3, 4, 3, 2, 8, 1, 9, 0, 6, 8, 6, 1, 1, 1, 5, 4, 8, 8, 5, 5, 8, 6, 4, 4,\n 6, 9, 8, 1, 5, 5]\ny_pred_prob = [[0.0597563199698925, 0.1344364434480667, 0.1173347756266594,\n 0.11292721331119537, 0.10652001202106476, 0.13155865669250488, \n 0.10057594627141953, 0.10029518604278564, 0.10313529521226883, \n 0.03346000984311104], [0.0002930850023403764, 0.23393571376800537, \n 0.09061524271965027, 0.21862193942070007, 0.04659481346607208, \n 0.04461496323347092, 0.0952368974685669, 0.2075100988149643, \n 0.0616493821144104, 0.0009278177167288959], [0.22330643236637115, \n 1.0582012919257977e-06, 0.22777651250362396, 0.20880192518234253, \n 9.877869615593227e-07, 0.0006437229458242655, 0.1556401550769806, \n 7.201562368663872e-08, 0.18382851779460907, 5.064675860921852e-07], [\n 1.7682419638731517e-05, 0.001197152421809733, 0.015430454164743423, \n 0.0037515582516789436, 0.32882484793663025, 0.0003495111595839262, \n 0.012810198590159416, 0.054448556154966354, 0.30387693643569946, \n 0.27929291129112244], [0.16070464253425598, 4.810986276027052e-09, \n 0.15206283330917358, 0.004463076591491699, 0.1652054488658905, \n 0.0038724008481949568, 0.17216043174266815, 0.13407163321971893, \n 0.029512932524085045, 0.17794682085514069], [0.10922636836767197, \n 2.2864300319724862e-07, 0.11546860635280609, 0.001813476555980742, \n 0.1788507103919983, 0.005888130981475115, 0.18413811922073364, \n 0.10866158455610275, 0.10712066292762756, 0.18883220851421356], [\n 0.005557563621550798, 0.0001692363148322329, 0.35343053936958313, \n 0.0015008420450612903, 0.00037875055568292737, 0.2150292843580246, \n 0.014169459231197834, 0.03244209289550781, 0.33539846539497375, \n 0.041923996061086655], [0.193454310297966, 3.662989183794707e-05, \n 0.10065275430679321, 0.00039752188604325056, 0.16119857132434845, \n 0.19390884041786194, 0.07022294402122498, 0.02460072562098503, \n 0.16083283722400665, 0.0946948304772377], [0.28058794140815735, \n 1.1208027217435301e-06, 0.018203848972916603, 0.16030532121658325, \n 0.00018859952979255468, 0.21325571835041046, 0.2328961044549942, \n 0.007604319602251053, 0.04473938047885895, 0.04221738502383232], [\n 0.1718112975358963, 7.514636672567576e-05, 0.15386143326759338, \n 0.008414546959102154, 0.001738831982947886, 0.15720322728157043, \n 0.17100712656974792, 0.15586316585540771, 0.104509636759758, \n 0.07551562041044235], [0.001471314812079072, 0.008587654680013657, \n 0.0367623046040535, 0.011750160716474056, 0.07068527489900589, \n 0.4173307418823242, 0.12449752539396286, 0.014547907747328281, \n 0.2990296185016632, 0.01533727627247572], [0.005052714608609676, \n 0.0073812128975987434, 0.009834956377744675, 0.33292853832244873, \n 0.0018518454162403941, 0.0015299966325983405, 0.002040529390797019, \n 0.3055168688297272, 0.32741934061050415, 0.006443792954087257], [\n 0.0011697597801685333, 0.20749542117118835, 0.07009387016296387, \n 0.08994801342487335, 0.09965154528617859, 0.060963381081819534, \n 0.13158728182315826, 0.1365581601858139, 0.11990636587142944, \n 0.08262615650892258], [0.020798824727535248, 1.469431822442857e-06, \n 0.016172533854842186, 0.021048342809081078, 0.009139545261859894, \n 0.3956705331802368, 0.3814408779144287, 7.980810551089235e-06, \n 0.1391601711511612, 0.016559595242142677], [0.0008747534011490643, \n 0.0009511907119303942, 0.055323366075754166, 0.05426914989948273, \n 0.03363798186182976, 0.12827005982398987, 0.03197509050369263, \n 0.0008451330941170454, 0.37859639525413513, 0.3152569532394409], [\n 0.001832291018217802, 9.253426833311096e-05, 0.27192848920822144, \n 0.18078717589378357, 0.004130060318857431, 0.00929891224950552, \n 0.1695500910282135, 0.29965919256210327, 0.020460698753595352, \n 0.042260222136974335], [0.15259969234466553, 0.00015921871818136424, \n 0.16849327087402344, 0.002068838570266962, 0.17735524475574493, \n 0.02342645265161991, 0.18245863914489746, 0.00010533139720791951, \n 0.11123484373092651, 0.1820984184741974], [0.18936939537525177, \n 1.7293215250901994e-06, 0.029253976419568062, 0.1424887329339981, \n 0.01099975686520338, 0.0074686696752905846, 0.053486552089452744, \n 0.2111600935459137, 0.14551354944705963, 0.21025745570659637], [\n 3.861714503727853e-05, 0.1669524759054184, 0.00032175786327570677, \n 0.15850232541561127, 0.1955566704273224, 0.012984608300030231, \n 0.14730143547058105, 0.066555455327034, 0.1175893247127533, \n 0.13419757783412933], [0.1504199206829071, 0.006808706559240818, \n 0.22468900680541992, 0.18946652114391327, 1.2391226846375503e-05, \n 0.10332755744457245, 0.15032899379730225, 2.30663204092707e-06, \n 0.17487214505672455, 7.243863365147263e-05], [0.23918452858924866, \n 5.279692683046733e-09, 0.0671931579709053, 0.2041931003332138, \n 9.380520350532606e-05, 0.18892300128936768, 0.16166524589061737, \n 1.2340686907919007e-06, 0.1280936300754547, 0.010652361437678337], [\n 0.0019602354150265455, 0.17319674789905548, 0.16884981095790863, \n 0.025876348838210106, 0.11373495310544968, 0.034116633236408234, \n 0.09377618134021759, 0.16857513785362244, 0.10720878094434738, \n 0.11270517110824585], [0.006008224096149206, 7.275425741681829e-05, \n 0.002679133554920554, 0.005456522107124329, 0.2852444648742676, \n 0.007294526789337397, 0.26774612069129944, 0.0033797386568039656, \n 0.15357472002506256, 0.26854372024536133], [0.0020487161818891764, \n 0.18302913010120392, 0.17970730364322662, 0.03157859668135643, \n 0.10424197465181351, 0.028137331828475, 0.049388039857149124, \n 0.17323219776153564, 0.13171784579753876, 0.11691895872354507], [\n 0.011249794624745846, 0.0003711018362082541, 0.32693105936050415, \n 0.0010822461917996407, 0.0076926033943891525, 0.04566335678100586, \n 0.005700047593563795, 0.32916736602783203, 0.09476791322231293, \n 0.17737449705600739], [0.0001925578253576532, 7.067231763357995e-06, \n 0.0001896199828479439, 0.09954455494880676, 0.23005598783493042, \n 0.2152310460805893, 0.09002267569303513, 0.017976609990000725, \n 0.0920918807387352, 0.25468799471855164], [0.0006383731961250305, \n 3.095208057857235e-06, 0.0005969868507236242, 0.41469672322273254, \n 0.0053739529103040695, 0.40698617696762085, 0.08218759298324585, \n 0.0003528161614667624, 0.07473969459533691, 0.014424380846321583], [\n 0.19537049531936646, 3.243912300235352e-13, 0.005169959273189306, \n 0.17694340646266937, 2.949438930954784e-05, 0.1400780826807022, \n 0.18864554166793823, 3.857006959151477e-06, 0.18823771178722382, \n 0.10552132874727249], [0.009722508490085602, 3.8531984500878025e-06, \n 0.07383214682340622, 0.03598225489258766, 0.07267675548791885, \n 0.1459459662437439, 0.07249364256858826, 0.002293274737894535, \n 0.48588359355926514, 0.1011660099029541], [0.21651780605316162, \n 9.559274261050632e-09, 0.14371894299983978, 0.13431811332702637, \n 2.7394575226935558e-05, 0.1838626116514206, 0.17265450954437256, \n 0.00012304158008191735, 0.12219242751598358, 0.0265849307179451], [\n 4.430914850672707e-05, 0.2043066918849945, 0.0002825123374350369, \n 0.16263452172279358, 0.1939067542552948, 0.1427866667509079, \n 0.11921370774507523, 0.0028419536538422108, 0.06556723266839981, \n 0.10841585695743561], [0.004471424967050552, 0.1858968585729599, \n 0.17653658986091614, 0.01416453905403614, 0.008144107647240162, \n 0.0843614935874939, 0.05890577659010887, 0.18505530059337616, \n 0.10232891887426376, 0.18013498187065125], [0.00041712025995366275, \n 1.1021310228898074e-06, 0.08412905037403107, 0.0002837374631781131, \n 0.2740859091281891, 0.013903344981372356, 0.08929961919784546, \n 0.2733091115951538, 0.2233879268169403, 0.04118315503001213], [\n 0.04552318528294563, 0.020853176712989807, 0.26410210132598877, \n 0.23437173664569855, 2.1701146124541992e-06, 0.10220374912023544, \n 0.07447297871112823, 7.592303154524416e-05, 0.25814488530158997, \n 0.00025002588517963886], [0.024719374254345894, 0.00217414740473032, \n 0.26734668016433716, 0.17261573672294617, 0.003498602891340852, \n 0.05698162689805031, 0.2737174332141876, 8.039058593567461e-05, \n 0.19880186021327972, 6.410985952243209e-05], [0.12234598398208618, \n 6.703280632791575e-06, 0.015603234991431236, 0.013786871917545795, \n 0.21616478264331818, 0.005412149243056774, 0.11406012624502182, \n 0.12291428446769714, 0.18262456357479095, 0.20708128809928894], [\n 0.193313866853714, 6.033819488493464e-08, 0.14491458237171173, \n 0.2349807769060135, 0.0006736826617270708, 0.003743150969967246, \n 0.12457092851400375, 0.004962997976690531, 0.23268520832061768, \n 0.060154590755701065], [0.006641837302595377, 0.005113706924021244, \n 0.060135774314403534, 0.37294134497642517, 0.0001917753543239087, \n 0.35536521673202515, 0.003515040036290884, 0.00014136293611954898, \n 0.19584619998931885, 0.00010780058073578402], [0.00022568553686141968, \n 0.1758676916360855, 0.08169379830360413, 0.11927571147680283, \n 0.14987629652023315, 0.026822827756404877, 0.09613550454378128, \n 0.14441852271556854, 0.11029191315174103, 0.09539227187633514], [\n 0.028152454644441605, 0.04798303544521332, 0.06989692151546478, \n 0.07051544636487961, 0.07356826215982437, 0.05468234792351723, \n 0.11397064477205276, 0.2294078767299652, 0.0822836384177208, \n 0.22953952848911285], [0.0009083361364901066, 0.16873282194137573, \n 0.040142301470041275, 0.13509070873260498, 0.16045929491519928, \n 0.09148524701595306, 0.0939648225903511, 0.13889746367931366, \n 0.043392572551965714, 0.12692658603191376], [7.008769898675382e-05, \n 0.0012455701362341642, 0.4437786936759949, 0.03154001384973526, \n 0.0033613061532378197, 0.0024434190709143877, 0.3866567313671112, \n 0.0005211094976402819, 0.13020911812782288, 0.00017409549036528915], [\n 0.00034864526242017746, 0.21021592617034912, 0.005514794960618019, \n 0.11704950034618378, 0.08421261608600616, 0.13176649808883667, \n 0.11882488429546356, 0.008054501377046108, 0.1467529684305191, \n 0.1772596538066864], [0.036879003047943115, 0.0014911789912730455, \n 0.2685071527957916, 0.0029583016876131296, 0.011879128403961658, \n 0.030892902985215187, 0.08989892154932022, 0.29645001888275146, \n 0.04054954648017883, 0.2204938679933548], [0.0064177061431109905, \n 0.0045189931988716125, 0.013788403943181038, 0.18153700232505798, \n 0.0003662402159534395, 0.5257023572921753, 0.06426692008972168, \n 9.742573638504837e-06, 0.2026320844888687, 0.000760772149078548], [\n 0.0017538872780278325, 0.0002046643348876387, 0.04638877511024475, \n 0.11219469457864761, 0.1732793003320694, 0.000888414157088846, \n 0.1527005136013031, 0.171849325299263, 0.16653017699718475, \n 0.17421048879623413], [6.957617006264627e-05, 3.015168840647675e-05, \n 0.05601977929472923, 0.06104991212487221, 0.14622464776039124, \n 0.0013683908618986607, 0.004713970702141523, 0.26153290271759033, \n 0.21816983819007874, 0.25082090497016907], [0.001964711584150791, \n 0.14094221591949463, 0.04670453444123268, 0.11537310481071472, \n 0.1456061750650406, 0.021807175129652023, 0.1023702397942543, \n 0.14592182636260986, 0.1320936679840088, 0.14721626043319702], [\n 0.0013557883212342858, 5.542307803807489e-07, 0.015518834814429283, \n 0.020929962396621704, 0.12795883417129517, 0.012969551607966423, \n 0.011510342359542847, 0.3424086570739746, 0.3332746922969818, \n 0.1340728998184204], [0.0951327458024025, 0.03636496141552925, \n 0.018829435110092163, 0.060135968029499054, 0.1569897085428238, \n 0.1514764130115509, 0.13258931040763855, 0.1450430303812027, \n 0.04603665694594383, 0.15740196406841278], [0.17052830755710602, \n 1.5615187294315547e-06, 0.0013229812029749155, 0.12005076557397842, \n 0.021564221009612083, 0.024421295151114464, 0.17088675498962402, \n 0.15222683548927307, 0.1693890392780304, 0.16960804164409637], [\n 0.006946968380361795, 0.3011370897293091, 0.3187958002090454, \n 0.06604688614606857, 0.011190904304385185, 0.05437859520316124, \n 0.020502492785453796, 0.010224146768450737, 0.21062366664409637, \n 0.00015340560639742762], [0.003341993084177375, 0.0016007163794711232, \n 0.0007675797096453607, 0.18986503779888153, 0.1190534457564354, \n 0.02811228297650814, 0.09639428555965424, 0.21583504974842072, \n 0.13505271077156067, 0.2099769562482834], [0.042331017553806305, \n 0.00029962626285851, 0.0023094473872333765, 0.18676534295082092, \n 0.000317152967909351, 0.48982951045036316, 0.1871659755706787, \n 8.205944141082e-06, 0.09039845317602158, 0.0005752819124609232], [\n 0.27066469192504883, 0.0001488085399614647, 0.025224560871720314, \n 0.03236522525548935, 0.00022321399592328817, 0.3199988305568695, \n 0.20726615190505981, 2.1540354282478802e-05, 0.13308577239513397, \n 0.011001424863934517], [0.21046556532382965, 8.32586906085453e-08, \n 0.050842639058828354, 0.0012313498882576823, 0.17998859286308289, \n 0.005802170839160681, 0.22032563388347626, 9.771327313501388e-06, \n 0.2085702270269394, 0.12276387959718704], [0.278763085603714, \n 2.956639932882865e-10, 0.2363770455121994, 0.0021949675865471363, \n 0.024400619789958, 0.01081052329391241, 0.2788945734500885, \n 0.000592902593780309, 0.09800171107053757, 0.06996453553438187], [\n 0.0012440741993486881, 0.0002501744020264596, 0.039189230650663376, \n 0.003109667217358947, 0.1353403925895691, 0.17648975551128387, \n 0.29823172092437744, 0.0005026640137657523, 0.1873668134212494, \n 0.15827545523643494], [4.636057929019444e-05, 0.004471238702535629, \n 0.010865537449717522, 0.03406133875250816, 0.2391168773174286, \n 0.0102084307000041, 0.24508318305015564, 0.10957624763250351, \n 0.10304577648639679, 0.24352511763572693], [0.007771539501845837, \n 0.003819737583398819, 0.05605701357126236, 0.0013185413554310799, \n 0.026425426825881004, 0.37273845076560974, 0.39364394545555115, \n 3.468452996457927e-05, 0.13644644618034363, 0.0017443000106140971], [\n 0.0042862421832978725, 4.118454022261631e-09, 0.24541069567203522, \n 1.311416235694196e-05, 0.002639196580275893, 0.2002275139093399, \n 0.35612747073173523, 8.159701246768236e-05, 0.11912810802459717, \n 0.07208611816167831], [0.10790199786424637, 0.00018712706514634192, \n 0.001723292050883174, 0.3369658291339874, 0.005216643214225769, \n 0.323357492685318, 0.04629630222916603, 0.0006358266109600663, \n 0.17700347304344177, 0.0007120332447811961], [0.01004449650645256, \n 0.0038342783227562904, 0.0029477709904313087, 0.39860454201698303, \n 0.000900272571016103, 0.32782217860221863, 0.010686549358069897, \n 0.0006012170924805105, 0.23407192528247833, 0.010486727580428123], [\n 0.0015078516444191337, 0.23596949875354767, 0.4038705825805664, \n 0.04463784024119377, 0.00036313795135356486, 0.005906661506742239, \n 0.012559221126139164, 0.010579549707472324, 0.2843676507472992, \n 0.0002381248341407627], [0.1887362003326416, 0.0019065006636083126, \n 0.2840288579463959, 0.2984219193458557, 4.9067231884691864e-05, \n 0.1615515947341919, 0.012938770465552807, 0.00029289082158356905, \n 0.052058152854442596, 1.6269357729470357e-05], [0.0006827416946180165, \n 2.276465056638699e-05, 0.023704057559370995, 0.16121432185173035, \n 0.0033186341170221567, 0.004117893520742655, 0.03627816215157509, \n 0.009822812862694263, 0.7281517386436462, 0.032687313854694366], [\n 0.0011369712883606553, 0.27387163043022156, 0.07185991108417511, \n 0.15628814697265625, 0.002854800783097744, 0.23154565691947937, \n 0.03204796463251114, 0.003870188258588314, 0.22623319923877716, \n 0.00029159500263631344], [0.0035695999395102262, 0.26706114411354065, \n 0.1508740484714508, 0.0013921442441642284, 0.019328434020280838, \n 0.13771453499794006, 0.029891734942793846, 0.03509771451354027, \n 0.24692872166633606, 0.1081417053937912], [0.000882012362126261, \n 2.536918327677995e-05, 0.0450599268078804, 0.412322998046875, \n 0.0025211411993950605, 0.002278776839375496, 0.011372447945177555, \n 0.1770726591348648, 0.33388030529022217, 0.014584112912416458], [\n 0.21903501451015472, 5.910552047794226e-09, 0.022012481465935707, \n 0.20099963247776031, 1.0874355211853981e-05, 0.21909210085868835, \n 0.21668335795402527, 4.337367798257219e-08, 0.12212178856134415, \n 4.4732783862855285e-05], [0.014651631936430931, 0.00830799899995327, \n 0.005935078486800194, 0.3953670263290405, 1.1293817806290463e-05, \n 0.4299878776073456, 0.017106691375374794, 0.00014334742445498705, \n 0.11808823049068451, 0.010400976054370403], [0.010301091708242893, \n 0.01435689628124237, 0.07430031895637512, 0.06989920139312744, \n 0.2338510900735855, 0.053795550018548965, 0.22257547080516815, \n 0.0029012206941843033, 0.09203658252954483, 0.22598253190517426], [\n 0.033016644418239594, 0.0020125852897763252, 0.06661045551300049, \n 0.4920836091041565, 0.00025867935619316995, 0.07482428848743439, \n 0.13923810422420502, 0.00012527030776254833, 0.19180776178836823, \n 2.269313517899718e-05], [0.1325867474079132, 0.004940022714436054, \n 0.22300080955028534, 0.2727201282978058, 3.310650572529994e-05, \n 0.12915031611919403, 0.01339033618569374, 1.0927167750196531e-05, \n 0.22410929203033447, 5.8520683523966e-05], [0.126132994890213, \n 0.0013935434399172664, 0.17098797857761383, 0.00039779843064025044, \n 0.07732491940259933, 0.16493096947669983, 0.014501826837658882, \n 0.03405503183603287, 0.20594964921474457, 0.2043251097202301], [\n 0.0008475463255308568, 0.19114449620246887, 0.03174148499965668, \n 0.1596948355436325, 0.1830475926399231, 0.11398201435804367, \n 0.11080365628004074, 0.10536272078752518, 0.05745834857225418, \n 0.04591764137148857], [0.0009525367058813572, 0.0012388192117214203, \n 0.0006522738258354366, 0.15977761149406433, 0.2019728273153305, \n 0.037797972559928894, 0.19880010187625885, 0.008799873292446136, \n 0.18693988025188446, 0.20306788384914398], [0.21417981386184692, \n 1.8215121144748991e-07, 0.11546390503644943, 0.10518436878919601, \n 5.3784842748427764e-05, 0.17964830994606018, 0.1753360480070114, \n 0.005312803667038679, 0.07569659501314163, 0.1291242241859436], [\n 0.03322113677859306, 1.1228289409359604e-08, 0.11529551446437836, \n 0.006697801407426596, 0.020004654303193092, 0.2904326617717743, \n 0.3397071361541748, 6.173769179440569e-06, 0.1187906265258789, \n 0.07584403455257416], [0.00018722846289165318, 0.00015633362636435777, \n 0.027305739000439644, 0.30433472990989685, 0.12216899544000626, \n 0.0051543135195970535, 0.07717369496822357, 5.6467473768861964e-05, \n 0.46220865845680237, 0.0012535307323560119], [0.2223890870809555, \n 1.8010264568601997e-07, 0.051188305020332336, 0.06915734708309174, \n 0.007792292162775993, 0.13037307560443878, 0.4795873761177063, \n 6.65841726004146e-05, 0.03377178683876991, 0.0056741489097476006], [\n 0.0011432061437517405, 0.172257199883461, 0.08959532529115677, \n 0.09976792335510254, 0.13487820327281952, 0.025573352351784706, \n 0.11224105209112167, 0.1427890509366989, 0.12529729306697845, \n 0.09645748883485794], [0.00039081714930944145, 0.17529502511024475, \n 0.07816692441701889, 0.12808731198310852, 0.13959045708179474, \n 0.04451143741607666, 0.07863735407590866, 0.1518080085515976, \n 0.09225541353225708, 0.11125729233026505], [0.0005360758514143527, \n 0.1871286779642105, 0.09343081712722778, 0.10187795013189316, \n 0.15403643250465393, 0.03745483607053757, 0.10108820348978043, \n 0.1381213515996933, 0.1196260005235672, 0.0666997954249382], [\n 0.02377643622457981, 0.002874232828617096, 0.06835681945085526, \n 0.08628982305526733, 0.16734763979911804, 0.1884264051914215, \n 0.06887176632881165, 0.1883554309606552, 0.11966855823993683, \n 0.0860329195857048], [0.0019290593918412924, 0.0004132240719627589, \n 0.08087942749261856, 0.00133050128351897, 0.2057691514492035, \n 0.014698517508804798, 0.10668473690748215, 0.2002524882555008, \n 0.19643288850784302, 0.19160999357700348], [4.1589693864807487e-05, \n 3.0074079404585063e-06, 0.00946643017232418, 0.0028675245121121407, \n 0.339987188577652, 0.006530506536364555, 0.21062259376049042, \n 5.006019819120411e-06, 0.4303286373615265, 0.00014742799976374954], [\n 0.23467645049095154, 3.957170217048535e-14, 0.016559595242142677, \n 0.22702592611312866, 0.0004185910802334547, 0.0031147561967372894, \n 0.2260916531085968, 2.4497327899553056e-07, 0.2333890199661255, \n 0.05872354656457901], [0.1723964959383011, 1.4810979109824984e-07, \n 0.001400468056090176, 0.3012116253376007, 0.00017689657397568226, \n 0.29611334204673767, 0.013564502820372581, 0.04992862418293953, \n 0.15185707807540894, 0.013350787572562695], [0.18757264316082, \n 1.502647393181178e-07, 0.0013043361250311136, 0.08373606950044632, \n 0.0005724140792153776, 0.1799388974905014, 0.14538954198360443, \n 0.16594813764095306, 0.06483398377895355, 0.17070381343364716], [\n 0.008307700976729393, 0.0005032537155784667, 0.04173918813467026, \n 0.055757056921720505, 0.2954571545124054, 0.046274807304143906, \n 0.15145555138587952, 0.00160416669677943, 0.36763912439346313, \n 0.031262170523405075], [0.03202534094452858, 2.929154447883775e-07, \n 0.03331722691655159, 0.0002443870762363076, 0.021324075758457184, \n 0.3864181637763977, 0.39420267939567566, 3.2187076612899546e-06, \n 0.08215467631816864, 0.050310224294662476], [0.03041147254407406, \n 3.317395247393051e-10, 0.013215649873018265, 0.009000282734632492, \n 0.15260590612888336, 9.569835674483329e-05, 0.22718068957328796, \n 0.0983223170042038, 0.23328886926174164, 0.23587895929813385], [\n 0.0017376767937093973, 0.01800091378390789, 0.09461784362792969, \n 0.008886604569852352, 0.23299837112426758, 0.03532419353723526, \n 0.20058980584144592, 0.1702878624200821, 0.06943482160568237, \n 0.1681220531463623], [0.26592451333999634, 1.378083283043452e-07, \n 0.26663097739219666, 0.00043869472574442625, 0.0753256231546402, \n 0.000345755455782637, 0.2718716561794281, 0.09590824693441391, \n 0.021168876439332962, 0.0023856020998209715], [0.007719929795712233, \n 0.000273746729362756, 0.06954099237918854, 0.11292484402656555, \n 0.17693056166172028, 0.0036023242864757776, 0.16335690021514893, \n 0.1139131560921669, 0.17289915680885315, 0.17883846163749695], [\n 0.0002722161589190364, 0.0014734293799847364, 0.0001780118327587843, \n 0.0718056932091713, 0.219150573015213, 0.02937471494078636, \n 0.15243956446647644, 0.07647080719470978, 0.21917390823364258, \n 0.22966115176677704], [0.0008591399528086185, 0.27216723561286926, \n 0.030793067067861557, 0.040201541036367416, 0.07587726414203644, \n 0.06215333193540573, 0.16188929975032806, 0.04154059290885925, \n 0.21999017894268036, 0.09452840685844421], [0.156771719455719, \n 0.0009459690772928298, 0.08676373958587646, 0.012071664445102215, \n 0.046294376254081726, 0.1705559939146042, 0.05631829798221588, \n 0.16554586589336395, 0.14995504915714264, 0.15477733314037323], [\n 0.0036007703747600317, 0.0036146841011941433, 0.007429149001836777, \n 0.10190737992525101, 0.0016259902622550726, 0.45585712790489197, \n 0.04189519211649895, 7.317630092984473e-07, 0.3802386522293091, \n 0.003830441040918231]]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
from django import template
register = template.Library()
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform_nrf(form):
"""
Renders given form with required fields marked.
@param form:
@return:
"""
return {'form': form, 'required_fields': False}
@register.inclusion_tag('tags/sendForm.html')
def show_sendform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/loginForm.html')
def show_loginform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/accountForm.html')
def show_accountform(form, is_superuser):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': False, 'is_superuser': is_superuser}
|
normal
|
{
"blob_id": "9f2105d188ac32a9eef31b21065e9bda13a02995",
"index": 6735,
"step-1": "<mask token>\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\n<mask token>\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-2": "<mask token>\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-4": "from django import template\nregister = template.Library()\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-5": "# -*- coding: utf-8 -*-\n# @COPYRIGHT_begin\n#\n# Copyright [2015] Michał Szczygieł, M4GiK Software\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @COPYRIGHT_end\nfrom django import template\n\n\nregister = template.Library()\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser': is_superuser}\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def _get_single_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, partition_info=None, reuse=None,
trainable=True, collections=None, caching_device=None, validate_shape=
True, use_resource=None):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError('If initializer is a constant, do not specify shape.')
should_check = reuse is not None
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
if should_check and not reuse:
tb = self._vars[name].op.traceback[::-1]
tb = [x for x in tb if 'tensorflow/python' not in x[0]][:3]
raise ValueError(
"""Variable %s already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:
%s"""
% (name, ''.join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError(
'Trying to share variable %s, but specified shape %s and found shape %s.'
% (name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError(
'Trying to share variable %s, but specified dtype %s and found dtype %s.'
% (name, dtype_str, found_type_str))
return found_var
if should_check and reuse:
raise ValueError(
'Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?'
% name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError(
'Shape of a new variable (%s) must be fully defined, but instead was %s.'
% (name, shape))
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = lambda : initializer(shape.as_list(), dtype=dtype,
partition_info=partition_info)
variable_dtype = dtype.base_dtype
if use_resource is None:
use_resource = False
if use_resource:
v = resource_variable_ops.ResourceVariable(initial_value=init_val,
name=name, trainable=trainable, collections=collections,
caching_device=caching_device, dtype=variable_dtype,
validate_shape=validate_shape)
else:
v = variables.Variable(initial_value=init_val, name=name, trainable
=trainable, collections=collections, caching_device=
caching_device, dtype=variable_dtype, validate_shape=validate_shape
)
self._vars[name] = v
logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name,
format(shape), initializer)
if regularizer:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer/'):
loss = regularizer(v)
if loss is not None:
logging.vlog(1,
'Applied regularizer to %s and added the result %s to REGULARIZATION_LOSSES.'
, v.name, loss.name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss
)
return v
<|reserved_special_token_1|>
def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=True, collections=None, caching_device=None, validate_shape=True, use_resource=None):
'Get or create a single Variable (e.g. a shard or entire variable).\n\n See the documentation of get_variable above (ignore partitioning components)\n for details.\n\n Args:\n name: see get_variable.\n shape: see get_variable.\n dtype: see get_variable.\n initializer: see get_variable.\n regularizer: see get_variable.\n partition_info: _PartitionInfo object.\n reuse: see get_variable.\n trainable: see get_variable.\n collections: see get_variable.\n caching_device: see get_variable.\n validate_shape: see get_variable.\n use_resource: see get_variable.\n\n Returns:\n A Variable. See documentation of get_variable above.\n\n Raises:\n ValueError: See documentation of get_variable above.\n '
initializing_from_value = False
if ((initializer is not None) and (not callable(initializer))):
initializing_from_value = True
if ((shape is not None) and initializing_from_value):
raise ValueError('If initializer is a constant, do not specify shape.')
should_check = (reuse is not None)
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if (name in self._vars):
if (should_check and (not reuse)):
tb = self._vars[name].op.traceback[::(- 1)]
tb = [x for x in tb if ('tensorflow/python' not in x[0])][:3]
raise ValueError(('Variable %s already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:\n\n%s' % (name, ''.join(traceback.format_list(tb)))))
found_var = self._vars[name]
if (not shape.is_compatible_with(found_var.get_shape())):
raise ValueError(('Trying to share variable %s, but specified shape %s and found shape %s.' % (name, shape, found_var.get_shape())))
if (not dtype.is_compatible_with(found_var.dtype)):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError(('Trying to share variable %s, but specified dtype %s and found dtype %s.' % (name, dtype_str, found_type_str)))
return found_var
if (should_check and reuse):
raise ValueError(('Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?' % name))
if ((not shape.is_fully_defined()) and (not initializing_from_value)):
raise ValueError(('Shape of a new variable (%s) must be fully defined, but instead was %s.' % (name, shape)))
if (initializer is None):
(initializer, initializing_from_value) = self._get_default_initializer(name=name, shape=shape, dtype=dtype)
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = (lambda : initializer(shape.as_list(), dtype=dtype, partition_info=partition_info))
variable_dtype = dtype.base_dtype
if (use_resource is None):
use_resource = False
if use_resource:
v = resource_variable_ops.ResourceVariable(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape)
else:
v = variables.Variable(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape)
self._vars[name] = v
logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name, format(shape), initializer)
if regularizer:
with ops.colocate_with(v.op):
with ops.name_scope((name + '/Regularizer/')):
loss = regularizer(v)
if (loss is not None):
logging.vlog(1, 'Applied regularizer to %s and added the result %s to REGULARIZATION_LOSSES.', v.name, loss.name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
|
flexible
|
{
"blob_id": "51ef1c0f6a17e12b2324a80f962b2ce47cc05bcc",
"index": 1348,
"step-1": "<mask token>\n",
"step-2": "def _get_single_variable(self, name, shape=None, dtype=dtypes.float32,\n initializer=None, regularizer=None, partition_info=None, reuse=None,\n trainable=True, collections=None, caching_device=None, validate_shape=\n True, use_resource=None):\n \"\"\"Get or create a single Variable (e.g. a shard or entire variable).\n\n See the documentation of get_variable above (ignore partitioning components)\n for details.\n\n Args:\n name: see get_variable.\n shape: see get_variable.\n dtype: see get_variable.\n initializer: see get_variable.\n regularizer: see get_variable.\n partition_info: _PartitionInfo object.\n reuse: see get_variable.\n trainable: see get_variable.\n collections: see get_variable.\n caching_device: see get_variable.\n validate_shape: see get_variable.\n use_resource: see get_variable.\n\n Returns:\n A Variable. See documentation of get_variable above.\n\n Raises:\n ValueError: See documentation of get_variable above.\n \"\"\"\n initializing_from_value = False\n if initializer is not None and not callable(initializer):\n initializing_from_value = True\n if shape is not None and initializing_from_value:\n raise ValueError('If initializer is a constant, do not specify shape.')\n should_check = reuse is not None\n dtype = dtypes.as_dtype(dtype)\n shape = tensor_shape.as_shape(shape)\n if name in self._vars:\n if should_check and not reuse:\n tb = self._vars[name].op.traceback[::-1]\n tb = [x for x in tb if 'tensorflow/python' not in x[0]][:3]\n raise ValueError(\n \"\"\"Variable %s already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:\n\n%s\"\"\"\n % (name, ''.join(traceback.format_list(tb))))\n found_var = self._vars[name]\n if not shape.is_compatible_with(found_var.get_shape()):\n raise ValueError(\n 'Trying to share variable %s, but specified shape %s and found shape %s.'\n % (name, shape, found_var.get_shape()))\n if not dtype.is_compatible_with(found_var.dtype):\n dtype_str = dtype.name\n found_type_str = found_var.dtype.name\n raise ValueError(\n 'Trying to share variable %s, but specified dtype %s and found dtype %s.'\n % (name, dtype_str, found_type_str))\n return found_var\n if should_check and reuse:\n raise ValueError(\n 'Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?'\n % name)\n if not shape.is_fully_defined() and not initializing_from_value:\n raise ValueError(\n 'Shape of a new variable (%s) must be fully defined, but instead was %s.'\n % (name, shape))\n if initializer is None:\n initializer, initializing_from_value = self._get_default_initializer(\n name=name, shape=shape, dtype=dtype)\n with ops.control_dependencies(None):\n if initializing_from_value:\n init_val = initializer\n variable_dtype = None\n else:\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n init_val = lambda : initializer(shape.as_list(), dtype=dtype,\n partition_info=partition_info)\n variable_dtype = dtype.base_dtype\n if use_resource is None:\n use_resource = False\n if use_resource:\n v = resource_variable_ops.ResourceVariable(initial_value=init_val,\n name=name, trainable=trainable, collections=collections,\n caching_device=caching_device, dtype=variable_dtype,\n validate_shape=validate_shape)\n else:\n v = variables.Variable(initial_value=init_val, name=name, trainable\n =trainable, collections=collections, caching_device=\n caching_device, dtype=variable_dtype, validate_shape=validate_shape\n )\n self._vars[name] = v\n logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name,\n format(shape), initializer)\n if regularizer:\n with ops.colocate_with(v.op):\n with ops.name_scope(name + '/Regularizer/'):\n loss = regularizer(v)\n if loss is not None:\n logging.vlog(1,\n 'Applied regularizer to %s and added the result %s to REGULARIZATION_LOSSES.'\n , v.name, loss.name)\n ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss\n )\n return v\n",
"step-3": "def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=True, collections=None, caching_device=None, validate_shape=True, use_resource=None):\n 'Get or create a single Variable (e.g. a shard or entire variable).\\n\\n See the documentation of get_variable above (ignore partitioning components)\\n for details.\\n\\n Args:\\n name: see get_variable.\\n shape: see get_variable.\\n dtype: see get_variable.\\n initializer: see get_variable.\\n regularizer: see get_variable.\\n partition_info: _PartitionInfo object.\\n reuse: see get_variable.\\n trainable: see get_variable.\\n collections: see get_variable.\\n caching_device: see get_variable.\\n validate_shape: see get_variable.\\n use_resource: see get_variable.\\n\\n Returns:\\n A Variable. See documentation of get_variable above.\\n\\n Raises:\\n ValueError: See documentation of get_variable above.\\n '\n initializing_from_value = False\n if ((initializer is not None) and (not callable(initializer))):\n initializing_from_value = True\n if ((shape is not None) and initializing_from_value):\n raise ValueError('If initializer is a constant, do not specify shape.')\n should_check = (reuse is not None)\n dtype = dtypes.as_dtype(dtype)\n shape = tensor_shape.as_shape(shape)\n if (name in self._vars):\n if (should_check and (not reuse)):\n tb = self._vars[name].op.traceback[::(- 1)]\n tb = [x for x in tb if ('tensorflow/python' not in x[0])][:3]\n raise ValueError(('Variable %s already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:\\n\\n%s' % (name, ''.join(traceback.format_list(tb)))))\n found_var = self._vars[name]\n if (not shape.is_compatible_with(found_var.get_shape())):\n raise ValueError(('Trying to share variable %s, but specified shape %s and found shape %s.' % (name, shape, found_var.get_shape())))\n if (not dtype.is_compatible_with(found_var.dtype)):\n dtype_str = dtype.name\n found_type_str = found_var.dtype.name\n raise ValueError(('Trying to share variable %s, but specified dtype %s and found dtype %s.' % (name, dtype_str, found_type_str)))\n return found_var\n if (should_check and reuse):\n raise ValueError(('Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?' % name))\n if ((not shape.is_fully_defined()) and (not initializing_from_value)):\n raise ValueError(('Shape of a new variable (%s) must be fully defined, but instead was %s.' % (name, shape)))\n if (initializer is None):\n (initializer, initializing_from_value) = self._get_default_initializer(name=name, shape=shape, dtype=dtype)\n with ops.control_dependencies(None):\n if initializing_from_value:\n init_val = initializer\n variable_dtype = None\n else:\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n init_val = (lambda : initializer(shape.as_list(), dtype=dtype, partition_info=partition_info))\n variable_dtype = dtype.base_dtype\n if (use_resource is None):\n use_resource = False\n if use_resource:\n v = resource_variable_ops.ResourceVariable(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape)\n else:\n v = variables.Variable(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape)\n self._vars[name] = v\n logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name, format(shape), initializer)\n if regularizer:\n with ops.colocate_with(v.op):\n with ops.name_scope((name + '/Regularizer/')):\n loss = regularizer(v)\n if (loss is not None):\n logging.vlog(1, 'Applied regularizer to %s and added the result %s to REGULARIZATION_LOSSES.', v.name, loss.name)\n ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)\n return v",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from ethereum.abi import (
decode_abi,
normalize_name as normalize_abi_method_name,
method_id as get_abi_method_id)
from ethereum.utils import encode_int, zpad, decode_hex
import json
import time
from web3 import Web3, HTTPProvider, TestRPCProvider
from solc import compile_source
from web3.contract import ConciseContract
import sys
import os
Cpath = os.path.dirname(os.path.realpath(__file__))
host = 'localhost'
TID = sys.argv[1]
# web3.py instance
w3 = Web3(HTTPProvider('http://'+host+':3000'))
f = open(Cpath+'/abi','r')
line = f.readline()
Jline = json.loads(line)
f.close()
abi = Jline
Transaction = w3.eth.getTransaction(TID)
#print(Transaction.input)
def decode_contract_call(contract_abi: list, call_data: str):
call_data_bin = decode_hex(call_data)
method_signature = call_data_bin[:4]
for description in contract_abi:
if description.get('type') != 'function':
continue
method_name = normalize_abi_method_name(description['name'])
arg_types = [item['type'] for item in description['inputs']]
method_id = get_abi_method_id(method_name, arg_types)
if zpad(encode_int(method_id), 4) == method_signature:
try:
args = decode_abi(arg_types, call_data_bin[4:])
except AssertionError:
# Invalid args
continue
return method_name, args
result = decode_contract_call(abi,Transaction.input)
#result = decode_contract_call(abi,"0xa9059cbb0000000000000000000000006cd5d27785e38b28a0d9656bcc795d90a4d670c500000000000000000000000000000000000000000000000000000000000001f4")
print(result)
print(Transaction['from'])
|
normal
|
{
"blob_id": "6437cb90ebaed7cf59df780062ebccf77fcef084",
"index": 4123,
"step-1": "<mask token>\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\n<mask token>\n",
"step-2": "<mask token>\nf.close()\n<mask token>\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\n<mask token>\nprint(result)\nprint(Transaction['from'])\n",
"step-3": "<mask token>\nCpath = os.path.dirname(os.path.realpath(__file__))\nhost = 'localhost'\nTID = sys.argv[1]\nw3 = Web3(HTTPProvider('http://' + host + ':3000'))\nf = open(Cpath + '/abi', 'r')\nline = f.readline()\nJline = json.loads(line)\nf.close()\nabi = Jline\nTransaction = w3.eth.getTransaction(TID)\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\nresult = decode_contract_call(abi, Transaction.input)\nprint(result)\nprint(Transaction['from'])\n",
"step-4": "from ethereum.abi import decode_abi, normalize_name as normalize_abi_method_name, method_id as get_abi_method_id\nfrom ethereum.utils import encode_int, zpad, decode_hex\nimport json\nimport time\nfrom web3 import Web3, HTTPProvider, TestRPCProvider\nfrom solc import compile_source\nfrom web3.contract import ConciseContract\nimport sys\nimport os\nCpath = os.path.dirname(os.path.realpath(__file__))\nhost = 'localhost'\nTID = sys.argv[1]\nw3 = Web3(HTTPProvider('http://' + host + ':3000'))\nf = open(Cpath + '/abi', 'r')\nline = f.readline()\nJline = json.loads(line)\nf.close()\nabi = Jline\nTransaction = w3.eth.getTransaction(TID)\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\nresult = decode_contract_call(abi, Transaction.input)\nprint(result)\nprint(Transaction['from'])\n",
"step-5": "from ethereum.abi import (\n decode_abi,\n normalize_name as normalize_abi_method_name,\n method_id as get_abi_method_id)\nfrom ethereum.utils import encode_int, zpad, decode_hex\n\nimport json\nimport time\nfrom web3 import Web3, HTTPProvider, TestRPCProvider\nfrom solc import compile_source\nfrom web3.contract import ConciseContract\nimport sys\nimport os\nCpath = os.path.dirname(os.path.realpath(__file__))\n\nhost = 'localhost'\nTID = sys.argv[1]\n\n# web3.py instance\nw3 = Web3(HTTPProvider('http://'+host+':3000'))\nf = open(Cpath+'/abi','r')\nline = f.readline()\nJline = json.loads(line)\nf.close()\n\nabi = Jline\n\nTransaction = w3.eth.getTransaction(TID)\n#print(Transaction.input)\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n # Invalid args\n continue\n return method_name, args\n\nresult = decode_contract_call(abi,Transaction.input)\n#result = decode_contract_call(abi,\"0xa9059cbb0000000000000000000000006cd5d27785e38b28a0d9656bcc795d90a4d670c500000000000000000000000000000000000000000000000000000000000001f4\")\nprint(result)\nprint(Transaction['from'])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cv2 #imports cv2 package
import numpy as np #imports numpy package
import matplotlib.pyplot as plt #imports matplotlib.pyplot package
img_noblur = cv2.imread('road8.jpg') #reads the image
imgnew = img_noblur.copy() #creates a copy of the image
img_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY) #converts the image from BGR to Grayscale
img = cv2.GaussianBlur(img_noblur_grey,(5,5),0) #applies a Gaussian Blur to the image for smoothing
sobelx = cv2.Sobel(img,-1,1,0,ksize=3) #applies Sobel horizontal kernel of size 3 to the image
sobelx[sobelx<100] = 0 #discards low intensity pixels
lines = cv2.HoughLinesP(sobelx,1,np.pi/180,100) #use HoughLinesP to detect lines in the image to which Sobel horizontal kernel was applied
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(imgnew,(x1,y1),(x2,y2),(0,255,0),5) #draws the detected lines on the image
imgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB) #converts the image from BGR to RGB
img_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB) #converts the original image from BGR to RGB for display
plt.subplot(131),plt.imshow(img_noblur,cmap = 'gray') #plots the original image
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(sobelx,cmap = 'gray') #plots the result of applying Sobel horizontal kernel to the image
plt.title('Sobel'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(imgnew,cmap = 'gray') #plots the result with the road markers detected
plt.title('Output'), plt.xticks([]), plt.yticks([])
plt.show() #displays the figure
|
normal
|
{
"blob_id": "7b4f46f6c286a7d0ef45079b2fd238b81d5f89eb",
"index": 3493,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in range(0, len(lines)):\n for x1, y1, x2, y2 in lines[x]:\n cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)\n<mask token>\nplt.subplot(131), plt.imshow(img_noblur, cmap='gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132), plt.imshow(sobelx, cmap='gray')\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133), plt.imshow(imgnew, cmap='gray')\nplt.title('Output'), plt.xticks([]), plt.yticks([])\nplt.show()\n",
"step-3": "<mask token>\nimg_noblur = cv2.imread('road8.jpg')\nimgnew = img_noblur.copy()\nimg_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)\nimg = cv2.GaussianBlur(img_noblur_grey, (5, 5), 0)\nsobelx = cv2.Sobel(img, -1, 1, 0, ksize=3)\nsobelx[sobelx < 100] = 0\nlines = cv2.HoughLinesP(sobelx, 1, np.pi / 180, 100)\nfor x in range(0, len(lines)):\n for x1, y1, x2, y2 in lines[x]:\n cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)\nimgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)\nimg_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)\nplt.subplot(131), plt.imshow(img_noblur, cmap='gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132), plt.imshow(sobelx, cmap='gray')\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133), plt.imshow(imgnew, cmap='gray')\nplt.title('Output'), plt.xticks([]), plt.yticks([])\nplt.show()\n",
"step-4": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimg_noblur = cv2.imread('road8.jpg')\nimgnew = img_noblur.copy()\nimg_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)\nimg = cv2.GaussianBlur(img_noblur_grey, (5, 5), 0)\nsobelx = cv2.Sobel(img, -1, 1, 0, ksize=3)\nsobelx[sobelx < 100] = 0\nlines = cv2.HoughLinesP(sobelx, 1, np.pi / 180, 100)\nfor x in range(0, len(lines)):\n for x1, y1, x2, y2 in lines[x]:\n cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)\nimgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)\nimg_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)\nplt.subplot(131), plt.imshow(img_noblur, cmap='gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132), plt.imshow(sobelx, cmap='gray')\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133), plt.imshow(imgnew, cmap='gray')\nplt.title('Output'), plt.xticks([]), plt.yticks([])\nplt.show()\n",
"step-5": "import cv2\t\t\t\t\t\t\t\t\t\t#imports cv2 package\nimport numpy as np \t\t\t\t\t\t\t\t#imports numpy package\nimport matplotlib.pyplot as plt \t\t\t\t#imports matplotlib.pyplot package\n\nimg_noblur = cv2.imread('road8.jpg')\t\t\t#reads the image\nimgnew = img_noblur.copy()\t\t\t\t\t\t#creates a copy of the image\nimg_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)\t#converts the image from BGR to Grayscale\nimg = cv2.GaussianBlur(img_noblur_grey,(5,5),0)\t#applies a Gaussian Blur to the image for smoothing\n\nsobelx = cv2.Sobel(img,-1,1,0,ksize=3)\t\t\t#applies Sobel horizontal kernel of size 3 to the image\nsobelx[sobelx<100] = 0\t\t\t\t\t\t\t#discards low intensity pixels\n\nlines = cv2.HoughLinesP(sobelx,1,np.pi/180,100)\t#use HoughLinesP to detect lines in the image to which Sobel horizontal kernel was applied\nfor x in range(0, len(lines)):\n for x1,y1,x2,y2 in lines[x]:\n cv2.line(imgnew,(x1,y1),(x2,y2),(0,255,0),5)\t\t#draws the detected lines on the image\n\nimgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)\t\t\t#converts the image from BGR to RGB\nimg_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)\t#converts the original image from BGR to RGB for display\n\nplt.subplot(131),plt.imshow(img_noblur,cmap = 'gray')\t\t#plots the original image\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132),plt.imshow(sobelx,cmap = 'gray')\t\t\t#plots the result of applying Sobel horizontal kernel to the image\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133),plt.imshow(imgnew,cmap = 'gray')\t\t\t#plots the result with the road markers detected\nplt.title('Output'), plt.xticks([]), plt.yticks([])\n\nplt.show()\t\t\t\t\t\t\t\t\t\t\t#displays the figure",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#rules used for pattern matching
# #1. x='[abc]' either a,b or c
#eg:
# import re
# x="[abc]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#2. x='[^abc]' except abc
#eg:
# import re
# x="[^abc]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#3. x='[a-z]' a to z ^ cap means that is not included
#eg
# import re
# x="[a-z]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#eg with ^
# import re
# x="[^a-z]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#4. x='[A-Z]' A TO Z
# import re
# x="[A-Z]"
# matcher=re.finditer(x,"abt SC5kZ")
# for match in matcher:
# print(match.start())
# print(match.group())
#5.X="[a-zA-Z]" BOTH LOWER AND UPPERCASE ARE CHECKED
import re
x="[a-zA-Z]"
matcher=re.finditer(x,"abtABIkz")
for match in matcher:
print(match.start())
print(match.group())
#6. X="[0-9]"
# import re
# x="[0-9]"
# matcher=re.finditer(x,"ab1z7")
# for match in matcher:
# print(match.start())
# print(match.group())
#7.x="[a-zA-Z0-9]"
# import re
# x="[a-zA-Z0-9]"
# matcher=re.finditer(x,"ab72ABIkz")
# for match in matcher:
# print(match.start())
# print(match.group())
#8.x='\s' check space
# import re
# x="\s"
# matcher=re.finditer(x,"ab tAB Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#9.x='\d' check the digits
# import re
# x="\d"
# matcher=re.finditer(x,"ab7tAB12kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#9. x='\D' except digits
# import re
# x="\D"
# matcher=re.finditer(x,"ab001tAB5236Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#10. x='\w' all words except special characters
# import re
# x="\w"
# matcher=re.finditer(x,"ab %tAB @Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#11.x='\W' for special characters
# import re
# x="\W"
# matcher=re.finditer(x,"ab!!tAB@Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
|
normal
|
{
"blob_id": "1ddc261cf174c109583fd0ead1f537673d29090a",
"index": 1433,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor match in matcher:\n print(match.start())\n print(match.group())\n",
"step-3": "<mask token>\nx = '[a-zA-Z]'\nmatcher = re.finditer(x, 'abtABIkz')\nfor match in matcher:\n print(match.start())\n print(match.group())\n",
"step-4": "import re\nx = '[a-zA-Z]'\nmatcher = re.finditer(x, 'abtABIkz')\nfor match in matcher:\n print(match.start())\n print(match.group())\n",
"step-5": " #rules used for pattern matching\n # #1. x='[abc]' either a,b or c\n#eg:\n# import re\n# x=\"[abc]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#2. x='[^abc]' except abc\n#eg:\n# import re\n# x=\"[^abc]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#3. x='[a-z]' a to z ^ cap means that is not included\n#eg\n# import re\n# x=\"[a-z]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#eg with ^\n# import re\n# x=\"[^a-z]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#4. x='[A-Z]' A TO Z\n# import re\n# x=\"[A-Z]\"\n# matcher=re.finditer(x,\"abt SC5kZ\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#5.X=\"[a-zA-Z]\" BOTH LOWER AND UPPERCASE ARE CHECKED\nimport re\nx=\"[a-zA-Z]\"\nmatcher=re.finditer(x,\"abtABIkz\")\nfor match in matcher:\n print(match.start())\n print(match.group())\n\n#6. X=\"[0-9]\"\n# import re\n# x=\"[0-9]\"\n# matcher=re.finditer(x,\"ab1z7\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#7.x=\"[a-zA-Z0-9]\"\n# import re\n# x=\"[a-zA-Z0-9]\"\n# matcher=re.finditer(x,\"ab72ABIkz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#8.x='\\s' check space\n# import re\n# x=\"\\s\"\n# matcher=re.finditer(x,\"ab tAB Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#9.x='\\d' check the digits\n# import re\n# x=\"\\d\"\n# matcher=re.finditer(x,\"ab7tAB12kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#9. x='\\D' except digits\n# import re\n# x=\"\\D\"\n# matcher=re.finditer(x,\"ab001tAB5236Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#10. x='\\w' all words except special characters\n# import re\n# x=\"\\w\"\n# matcher=re.finditer(x,\"ab %tAB @Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n\n#11.x='\\W' for special characters\n# import re\n# x=\"\\W\"\n# matcher=re.finditer(x,\"ab!!tAB@Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import *
from django.views.decorators.csrf import csrf_exempt
def index(request):
notes = Note.objects.all().order_by('-created_at')
context = {
"notes" : notes
}
return render(request, 'notes/index.html', context)
def add(request):
if request.method == 'POST':
errors = Note.objects.validate(request.POST)
if errors:
for error in errors:
messages.error(request, error)
return redirect('/')
else:
Note.objects.create(title=request.POST['title'], description=request.POST['description'])
context = {
"notes": Note.objects.all().order_by('-created_at')
}
return render(request, 'notes/notes_index.html', context)
@csrf_exempt
def delete(request, id):
if request.method == 'POST':
note = Note.objects.get(id=id)
note.delete()
context = {
"notes": Note.objects.all().order_by('-created_at')
}
return render(request, 'notes/notes_index.html', context)
def edit(request, id):
if request.method == "POST":
note = Note.objects.get(id=id)
note.description = request.POST['edit_description']
note.save()
context = {
"notes": Note.objects.all().order_by('-created_at')
}
return render(request, 'notes/notes_index.html', context)
|
normal
|
{
"blob_id": "e983db4b99e73929c02eb84fab1ee56138048052",
"index": 8221,
"step-1": "<mask token>\n\n\ndef index(request):\n notes = Note.objects.all().order_by('-created_at')\n context = {'notes': notes}\n return render(request, 'notes/index.html', context)\n\n\ndef add(request):\n if request.method == 'POST':\n errors = Note.objects.validate(request.POST)\n if errors:\n for error in errors:\n messages.error(request, error)\n return redirect('/')\n else:\n Note.objects.create(title=request.POST['title'], description=\n request.POST['description'])\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n notes = Note.objects.all().order_by('-created_at')\n context = {'notes': notes}\n return render(request, 'notes/index.html', context)\n\n\ndef add(request):\n if request.method == 'POST':\n errors = Note.objects.validate(request.POST)\n if errors:\n for error in errors:\n messages.error(request, error)\n return redirect('/')\n else:\n Note.objects.create(title=request.POST['title'], description=\n request.POST['description'])\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n\n\n<mask token>\n\n\ndef edit(request, id):\n if request.method == 'POST':\n note = Note.objects.get(id=id)\n note.description = request.POST['edit_description']\n note.save()\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n",
"step-3": "<mask token>\n\n\ndef index(request):\n notes = Note.objects.all().order_by('-created_at')\n context = {'notes': notes}\n return render(request, 'notes/index.html', context)\n\n\ndef add(request):\n if request.method == 'POST':\n errors = Note.objects.validate(request.POST)\n if errors:\n for error in errors:\n messages.error(request, error)\n return redirect('/')\n else:\n Note.objects.create(title=request.POST['title'], description=\n request.POST['description'])\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n\n\n@csrf_exempt\ndef delete(request, id):\n if request.method == 'POST':\n note = Note.objects.get(id=id)\n note.delete()\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n\n\ndef edit(request, id):\n if request.method == 'POST':\n note = Note.objects.get(id=id)\n note.description = request.POST['edit_description']\n note.save()\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import *\nfrom django.views.decorators.csrf import csrf_exempt\n\n\ndef index(request):\n notes = Note.objects.all().order_by('-created_at')\n context = {'notes': notes}\n return render(request, 'notes/index.html', context)\n\n\ndef add(request):\n if request.method == 'POST':\n errors = Note.objects.validate(request.POST)\n if errors:\n for error in errors:\n messages.error(request, error)\n return redirect('/')\n else:\n Note.objects.create(title=request.POST['title'], description=\n request.POST['description'])\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n\n\n@csrf_exempt\ndef delete(request, id):\n if request.method == 'POST':\n note = Note.objects.get(id=id)\n note.delete()\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n\n\ndef edit(request, id):\n if request.method == 'POST':\n note = Note.objects.get(id=id)\n note.description = request.POST['edit_description']\n note.save()\n context = {'notes': Note.objects.all().order_by('-created_at')}\n return render(request, 'notes/notes_index.html', context)\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import *\nfrom django.views.decorators.csrf import csrf_exempt\n\ndef index(request):\n notes = Note.objects.all().order_by('-created_at')\n context = {\n \"notes\" : notes\n }\n return render(request, 'notes/index.html', context)\n\n\ndef add(request):\n if request.method == 'POST':\n errors = Note.objects.validate(request.POST)\n if errors:\n for error in errors:\n messages.error(request, error)\n return redirect('/')\n else:\n Note.objects.create(title=request.POST['title'], description=request.POST['description'])\n \n context = {\n \"notes\": Note.objects.all().order_by('-created_at')\n }\n return render(request, 'notes/notes_index.html', context)\n \n@csrf_exempt\ndef delete(request, id):\n if request.method == 'POST':\n note = Note.objects.get(id=id)\n note.delete()\n context = {\n \"notes\": Note.objects.all().order_by('-created_at')\n }\n return render(request, 'notes/notes_index.html', context)\n\ndef edit(request, id):\n if request.method == \"POST\":\n note = Note.objects.get(id=id)\n note.description = request.POST['edit_description']\n note.save()\n context = {\n \"notes\": Note.objects.all().order_by('-created_at')\n }\n return render(request, 'notes/notes_index.html', context)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class QuantModelMetricsResource(MetricsResource):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library ==
pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MetricsResource(BaseResource):
<|reserved_special_token_0|>
def get(self):
"""
Implements the GET method for endpoint "/metrics". By default the results are
order by 'metric_id' ascending.
Implemented Query Parameters:
- is_active: to filter results that are either active or inactive. Boolean and
case insensitive.
- frequency: filter results based on a metric frequency. Values of this enum must
be respected. Case insensitive.
- threshold_type: filter results based on a metric threshold type. Values of this
enum must be respected. Case insensitive.
- sort: allows one to order the resulting collecting by 'metric_id' in descending
order. This should be done by specifying the query parameter as "sort=-metric_id".
Case insensitive.
Note: if unknown query parameters are given these will be ignored.
:return: a collection of metrics
"""
query = self.build_query()
metrics = query.all()
result = self.schema_collection.dump(metrics)
return success(result)
def build_query(self):
"""
Builds the query (without executing it) to the be used in the GET method.
:return: query with all the query conditions specified for obtaining the metrics
that are in the database and respect the desired filters (query parameters).
"""
query = Metric.query.filter(Metric.metric_type == self.metric_type)
is_active = request.args.get('is_active')
frequency = request.args.get('frequency')
threshold_type = request.args.get('threshold_type')
sort = request.args.get('sort')
if is_active is not None:
is_active = is_active.lower() == 'true'
query = Metric.query.filter_by(is_active=is_active)
if frequency is not None:
try:
frequency = Frequency.from_name(frequency)
except ValueError as e:
msg = (
f"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}"
)
abort(400, message=msg)
query = query.filter_by(frequency=frequency)
if threshold_type is not None:
try:
threshold_type = ThresholdType.from_name(threshold_type)
except ValueError as e:
msg = (
f"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}"
)
abort(400, message=msg)
query = query.filter_by(threshold_type=threshold_type)
if sort is not None and sort.lstrip('-') == 'metric_id':
query = query.order_by(Metric.metric_id.desc())
else:
query = query.order_by(Metric.metric_id)
return query
def post(self):
"""
Implements the POST method for endpoint "/metrics". It should be used to create a
new metric.
:return: the metric as a json created in the database (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
json_data['metric_id'] = 'TBD'
json_data['metric_type'] = 'model'
new_metric = self.load(json_data, session=db.session)
try:
db.session.add(new_metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
result = self.schema.dump(new_metric)
return success(result, code=201)
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library ==
pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
<|reserved_special_token_1|>
from flask import request
from flask_restful import abort
from sqlalchemy.exc import SQLAlchemyError
from gm.main.models.model import db, Metric, QuantModelMetricSchema, MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, ThresholdType
from gm.main.resources import success, get_metric_by_id, BaseResource
class MetricsResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def get(self):
"""
Implements the GET method for endpoint "/metrics". By default the results are
order by 'metric_id' ascending.
Implemented Query Parameters:
- is_active: to filter results that are either active or inactive. Boolean and
case insensitive.
- frequency: filter results based on a metric frequency. Values of this enum must
be respected. Case insensitive.
- threshold_type: filter results based on a metric threshold type. Values of this
enum must be respected. Case insensitive.
- sort: allows one to order the resulting collecting by 'metric_id' in descending
order. This should be done by specifying the query parameter as "sort=-metric_id".
Case insensitive.
Note: if unknown query parameters are given these will be ignored.
:return: a collection of metrics
"""
query = self.build_query()
metrics = query.all()
result = self.schema_collection.dump(metrics)
return success(result)
def build_query(self):
"""
Builds the query (without executing it) to the be used in the GET method.
:return: query with all the query conditions specified for obtaining the metrics
that are in the database and respect the desired filters (query parameters).
"""
query = Metric.query.filter(Metric.metric_type == self.metric_type)
is_active = request.args.get('is_active')
frequency = request.args.get('frequency')
threshold_type = request.args.get('threshold_type')
sort = request.args.get('sort')
if is_active is not None:
is_active = is_active.lower() == 'true'
query = Metric.query.filter_by(is_active=is_active)
if frequency is not None:
try:
frequency = Frequency.from_name(frequency)
except ValueError as e:
msg = (
f"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}"
)
abort(400, message=msg)
query = query.filter_by(frequency=frequency)
if threshold_type is not None:
try:
threshold_type = ThresholdType.from_name(threshold_type)
except ValueError as e:
msg = (
f"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}"
)
abort(400, message=msg)
query = query.filter_by(threshold_type=threshold_type)
if sort is not None and sort.lstrip('-') == 'metric_id':
query = query.order_by(Metric.metric_id.desc())
else:
query = query.order_by(Metric.metric_id)
return query
def post(self):
"""
Implements the POST method for endpoint "/metrics". It should be used to create a
new metric.
:return: the metric as a json created in the database (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
json_data['metric_id'] = 'TBD'
json_data['metric_type'] = 'model'
new_metric = self.load(json_data, session=db.session)
try:
db.session.add(new_metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
result = self.schema.dump(new_metric)
return success(result, code=201)
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library ==
pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
<|reserved_special_token_1|>
from flask import request
from flask_restful import abort
from sqlalchemy.exc import SQLAlchemyError
from gm.main.models.model import db, Metric, QuantModelMetricSchema, \
MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, \
ThresholdType
from gm.main.resources import success, get_metric_by_id, BaseResource
class MetricsResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def get(self):
"""
Implements the GET method for endpoint "/metrics". By default the results are
order by 'metric_id' ascending.
Implemented Query Parameters:
- is_active: to filter results that are either active or inactive. Boolean and
case insensitive.
- frequency: filter results based on a metric frequency. Values of this enum must
be respected. Case insensitive.
- threshold_type: filter results based on a metric threshold type. Values of this
enum must be respected. Case insensitive.
- sort: allows one to order the resulting collecting by 'metric_id' in descending
order. This should be done by specifying the query parameter as "sort=-metric_id".
Case insensitive.
Note: if unknown query parameters are given these will be ignored.
:return: a collection of metrics
"""
query = self.build_query()
metrics = query.all()
result = self.schema_collection.dump(metrics)
return success(result)
def build_query(self):
"""
Builds the query (without executing it) to the be used in the GET method.
:return: query with all the query conditions specified for obtaining the metrics
that are in the database and respect the desired filters (query parameters).
"""
# this filter is required
query = Metric.query.filter(Metric.metric_type == self.metric_type)
# get query parameters (parameters which are not here are ignored)
is_active = request.args.get('is_active')
frequency = request.args.get('frequency')
threshold_type = request.args.get('threshold_type')
sort = request.args.get('sort')
# process each parameter, and if valid add it as a query condition
if is_active is not None:
is_active = is_active.lower() == 'true'
query = Metric.query.filter_by(is_active=is_active)
if frequency is not None:
try:
frequency = Frequency.from_name(frequency)
except ValueError as e:
msg = f"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}"
abort(400, message=msg)
query = query.filter_by(frequency=frequency)
if threshold_type is not None:
try:
threshold_type = ThresholdType.from_name(threshold_type)
except ValueError as e:
msg = f"Invalid 'threshold_type': {threshold_type}. Use one of " \
f"{ThresholdType.values()}"
abort(400, message=msg)
query = query.filter_by(threshold_type=threshold_type)
if sort is not None and sort.lstrip("-") == 'metric_id':
query = query.order_by(Metric.metric_id.desc())
else:
query = query.order_by(Metric.metric_id)
return query
def post(self):
"""
Implements the POST method for endpoint "/metrics". It should be used to create a
new metric.
:return: the metric as a json created in the database (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
# make sure the metric_id (temporary) and metric_type (model) are filled
json_data["metric_id"] = "TBD"
json_data["metric_type"] = "model"
# validate and deserialize input
new_metric = self.load(json_data, session=db.session)
# get the next metric id and update metric object
try:
db.session.add(new_metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
# dump to json and return result
result = self.schema.dump(new_metric)
return success(result, code=201)
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
# build query from base class add required field for joining with parent
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
# get the remaining query parameters
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
# process each parameter and, if valid, add as a query condition
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library == pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
# Validate and deserialize input
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
# if it was found and deserialized successfully try to commit
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
# dump as json to send in the end if del is successful
result = self.schema.dump(metric)
# if result was found, delete it from database
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
|
flexible
|
{
"blob_id": "1431a0049c05a99e0b68052f56bf8e2e3c48e1aa",
"index": 622,
"step-1": "<mask token>\n\n\nclass QuantModelMetricsResource(MetricsResource):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-2": "<mask token>\n\n\nclass QuantModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - asset_class: to filter results by a given asset class.\n - model_name: to filter results by a given model name.\n - pricing_library: to filter results for a given pricing library.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this model endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\n asset_class = request.args.get('asset_class')\n model_name = request.args.get('model_name')\n pricing_library = request.args.get('pricing_library')\n if asset_class is not None:\n query = query.filter(QuantModelMetric.asset_class == asset_class)\n if model_name is not None:\n query = query.filter(QuantModelMetric.model_name == model_name)\n if pricing_library is not None:\n query = query.filter(QuantModelMetric.pricing_library ==\n pricing_library)\n return query\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass MetricsResource(BaseResource):\n <mask token>\n\n def get(self):\n \"\"\"\n Implements the GET method for endpoint \"/metrics\". By default the results are\n order by 'metric_id' ascending.\n\n Implemented Query Parameters:\n - is_active: to filter results that are either active or inactive. Boolean and\n case insensitive.\n - frequency: filter results based on a metric frequency. Values of this enum must\n be respected. Case insensitive.\n - threshold_type: filter results based on a metric threshold type. Values of this\n enum must be respected. Case insensitive.\n - sort: allows one to order the resulting collecting by 'metric_id' in descending\n order. This should be done by specifying the query parameter as \"sort=-metric_id\".\n Case insensitive.\n\n Note: if unknown query parameters are given these will be ignored.\n\n :return: a collection of metrics\n \"\"\"\n query = self.build_query()\n metrics = query.all()\n result = self.schema_collection.dump(metrics)\n return success(result)\n\n def build_query(self):\n \"\"\"\n Builds the query (without executing it) to the be used in the GET method.\n :return: query with all the query conditions specified for obtaining the metrics\n that are in the database and respect the desired filters (query parameters).\n \"\"\"\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\n is_active = request.args.get('is_active')\n frequency = request.args.get('frequency')\n threshold_type = request.args.get('threshold_type')\n sort = request.args.get('sort')\n if is_active is not None:\n is_active = is_active.lower() == 'true'\n query = Metric.query.filter_by(is_active=is_active)\n if frequency is not None:\n try:\n frequency = Frequency.from_name(frequency)\n except ValueError as e:\n msg = (\n f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(frequency=frequency)\n if threshold_type is not None:\n try:\n threshold_type = ThresholdType.from_name(threshold_type)\n except ValueError as e:\n msg = (\n f\"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(threshold_type=threshold_type)\n if sort is not None and sort.lstrip('-') == 'metric_id':\n query = query.order_by(Metric.metric_id.desc())\n else:\n query = query.order_by(Metric.metric_id)\n return query\n\n def post(self):\n \"\"\"\n Implements the POST method for endpoint \"/metrics\". It should be used to create a\n new metric.\n\n :return: the metric as a json created in the database (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n json_data['metric_id'] = 'TBD'\n json_data['metric_type'] = 'model'\n new_metric = self.load(json_data, session=db.session)\n try:\n db.session.add(new_metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n result = self.schema.dump(new_metric)\n return success(result, code=201)\n\n\nclass QuantModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - asset_class: to filter results by a given asset class.\n - model_name: to filter results by a given model name.\n - pricing_library: to filter results for a given pricing library.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this model endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\n asset_class = request.args.get('asset_class')\n model_name = request.args.get('model_name')\n pricing_library = request.args.get('pricing_library')\n if asset_class is not None:\n query = query.filter(QuantModelMetric.asset_class == asset_class)\n if model_name is not None:\n query = query.filter(QuantModelMetric.model_name == model_name)\n if pricing_library is not None:\n query = query.filter(QuantModelMetric.pricing_library ==\n pricing_library)\n return query\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-4": "from flask import request\nfrom flask_restful import abort\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom gm.main.models.model import db, Metric, QuantModelMetricSchema, MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, ThresholdType\nfrom gm.main.resources import success, get_metric_by_id, BaseResource\n\n\nclass MetricsResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def get(self):\n \"\"\"\n Implements the GET method for endpoint \"/metrics\". By default the results are\n order by 'metric_id' ascending.\n\n Implemented Query Parameters:\n - is_active: to filter results that are either active or inactive. Boolean and\n case insensitive.\n - frequency: filter results based on a metric frequency. Values of this enum must\n be respected. Case insensitive.\n - threshold_type: filter results based on a metric threshold type. Values of this\n enum must be respected. Case insensitive.\n - sort: allows one to order the resulting collecting by 'metric_id' in descending\n order. This should be done by specifying the query parameter as \"sort=-metric_id\".\n Case insensitive.\n\n Note: if unknown query parameters are given these will be ignored.\n\n :return: a collection of metrics\n \"\"\"\n query = self.build_query()\n metrics = query.all()\n result = self.schema_collection.dump(metrics)\n return success(result)\n\n def build_query(self):\n \"\"\"\n Builds the query (without executing it) to the be used in the GET method.\n :return: query with all the query conditions specified for obtaining the metrics\n that are in the database and respect the desired filters (query parameters).\n \"\"\"\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\n is_active = request.args.get('is_active')\n frequency = request.args.get('frequency')\n threshold_type = request.args.get('threshold_type')\n sort = request.args.get('sort')\n if is_active is not None:\n is_active = is_active.lower() == 'true'\n query = Metric.query.filter_by(is_active=is_active)\n if frequency is not None:\n try:\n frequency = Frequency.from_name(frequency)\n except ValueError as e:\n msg = (\n f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(frequency=frequency)\n if threshold_type is not None:\n try:\n threshold_type = ThresholdType.from_name(threshold_type)\n except ValueError as e:\n msg = (\n f\"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(threshold_type=threshold_type)\n if sort is not None and sort.lstrip('-') == 'metric_id':\n query = query.order_by(Metric.metric_id.desc())\n else:\n query = query.order_by(Metric.metric_id)\n return query\n\n def post(self):\n \"\"\"\n Implements the POST method for endpoint \"/metrics\". It should be used to create a\n new metric.\n\n :return: the metric as a json created in the database (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n json_data['metric_id'] = 'TBD'\n json_data['metric_type'] = 'model'\n new_metric = self.load(json_data, session=db.session)\n try:\n db.session.add(new_metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n result = self.schema.dump(new_metric)\n return success(result, code=201)\n\n\nclass QuantModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - asset_class: to filter results by a given asset class.\n - model_name: to filter results by a given model name.\n - pricing_library: to filter results for a given pricing library.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this model endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\n asset_class = request.args.get('asset_class')\n model_name = request.args.get('model_name')\n pricing_library = request.args.get('pricing_library')\n if asset_class is not None:\n query = query.filter(QuantModelMetric.asset_class == asset_class)\n if model_name is not None:\n query = query.filter(QuantModelMetric.model_name == model_name)\n if pricing_library is not None:\n query = query.filter(QuantModelMetric.pricing_library ==\n pricing_library)\n return query\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-5": "from flask import request\r\nfrom flask_restful import abort\r\nfrom sqlalchemy.exc import SQLAlchemyError\r\n\r\nfrom gm.main.models.model import db, Metric, QuantModelMetricSchema, \\\r\n MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, \\\r\n ThresholdType\r\nfrom gm.main.resources import success, get_metric_by_id, BaseResource\r\n\r\n\r\nclass MetricsResource(BaseResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint \"/metrics\".\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, POST\r\n \"\"\"\r\n\r\n def get(self):\r\n \"\"\"\r\n Implements the GET method for endpoint \"/metrics\". By default the results are\r\n order by 'metric_id' ascending.\r\n\r\n Implemented Query Parameters:\r\n - is_active: to filter results that are either active or inactive. Boolean and\r\n case insensitive.\r\n - frequency: filter results based on a metric frequency. Values of this enum must\r\n be respected. Case insensitive.\r\n - threshold_type: filter results based on a metric threshold type. Values of this\r\n enum must be respected. Case insensitive.\r\n - sort: allows one to order the resulting collecting by 'metric_id' in descending\r\n order. This should be done by specifying the query parameter as \"sort=-metric_id\".\r\n Case insensitive.\r\n\r\n Note: if unknown query parameters are given these will be ignored.\r\n\r\n :return: a collection of metrics\r\n \"\"\"\r\n query = self.build_query()\r\n metrics = query.all()\r\n result = self.schema_collection.dump(metrics)\r\n return success(result)\r\n\r\n def build_query(self):\r\n \"\"\"\r\n Builds the query (without executing it) to the be used in the GET method.\r\n :return: query with all the query conditions specified for obtaining the metrics\r\n that are in the database and respect the desired filters (query parameters).\r\n \"\"\"\r\n\r\n # this filter is required\r\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\r\n\r\n # get query parameters (parameters which are not here are ignored)\r\n is_active = request.args.get('is_active')\r\n frequency = request.args.get('frequency')\r\n threshold_type = request.args.get('threshold_type')\r\n sort = request.args.get('sort')\r\n\r\n # process each parameter, and if valid add it as a query condition\r\n if is_active is not None:\r\n is_active = is_active.lower() == 'true'\r\n query = Metric.query.filter_by(is_active=is_active)\r\n if frequency is not None:\r\n try:\r\n frequency = Frequency.from_name(frequency)\r\n except ValueError as e:\r\n msg = f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(frequency=frequency)\r\n if threshold_type is not None:\r\n try:\r\n threshold_type = ThresholdType.from_name(threshold_type)\r\n except ValueError as e:\r\n msg = f\"Invalid 'threshold_type': {threshold_type}. Use one of \" \\\r\n f\"{ThresholdType.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(threshold_type=threshold_type)\r\n if sort is not None and sort.lstrip(\"-\") == 'metric_id':\r\n query = query.order_by(Metric.metric_id.desc())\r\n else:\r\n query = query.order_by(Metric.metric_id)\r\n\r\n return query\r\n\r\n\r\n def post(self):\r\n \"\"\"\r\n Implements the POST method for endpoint \"/metrics\". It should be used to create a\r\n new metric.\r\n\r\n :return: the metric as a json created in the database (in case of success)\r\n \"\"\"\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)\r\n\r\n\r\nclass QuantModelMetricsResource(MetricsResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/quant_model/metrics/{metric_id}\".\r\n\r\n This subclass uses almost everything from the base class, it only needs to specify the\r\n appropriate schemas in the constructor, and to override the build_query method so that\r\n the appropriate metric_type is filtered and the remaining query parameters (specific\r\n to this endpoint) are processed.\r\n\r\n Implemented Query Parameters:\r\n - asset_class: to filter results by a given asset class.\r\n - model_name: to filter results by a given model name.\r\n - pricing_library: to filter results for a given pricing library.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, POST\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = QuantModelMetricSchema()\r\n schema_collection = QuantModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)\r\n\r\n def build_query(self):\r\n \"\"\"\r\n Override method to include specific query parameters to this model endpoint.\r\n \"\"\"\r\n # build query from base class add required field for joining with parent\r\n query = super().build_query()\r\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\r\n\r\n # get the remaining query parameters\r\n asset_class = request.args.get('asset_class')\r\n model_name = request.args.get('model_name')\r\n pricing_library = request.args.get('pricing_library')\r\n\r\n # process each parameter and, if valid, add as a query condition\r\n if asset_class is not None:\r\n query = query.filter(QuantModelMetric.asset_class == asset_class)\r\n if model_name is not None:\r\n query = query.filter(QuantModelMetric.model_name == model_name)\r\n if pricing_library is not None:\r\n query = query.filter(QuantModelMetric.pricing_library == pricing_library)\r\n return query\r\n\r\n\r\nclass MlModelMetricsResource(MetricsResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/ml_model/metrics/{metric_id}\".\r\n\r\n This subclass uses almost everything from the base class, it only needs to specify the\r\n appropriate schemas in the constructor, and to override the build_query method so that\r\n the appropriate metric_type is filtered and the remaining query parameters (specific\r\n to this endpoint) are processed.\r\n\r\n Implemented Query Parameters:\r\n - algorithm: to filter results by a given algorithm.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, POST\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = MlModelMetricSchema()\r\n schema_collection = MlModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)\r\n\r\n def build_query(self):\r\n \"\"\"\r\n Override method to include specific query parameters to this ml_model\r\n endpoint.\r\n \"\"\"\r\n query = super().build_query()\r\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\r\n algorithm = request.args.get('algorithm')\r\n if algorithm is not None:\r\n query = query.filter(MlModelMetric.algorithm == algorithm)\r\n return query\r\n\r\n\r\nclass MetricResource(BaseResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, PUT, DELETE\r\n \"\"\"\r\n\r\n def get(self, metric_id):\r\n \"\"\"\r\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\r\n to get a single metric from the database.\r\n\r\n :param metric_id: the metric_id associated with this endpoint\r\n :return: the json object of metric found in the database (if it exists)\r\n \"\"\"\r\n metric = get_metric_by_id(metric_id)\r\n return self.schema.jsonify(metric)\r\n\r\n def put(self, metric_id):\r\n \"\"\"\r\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\r\n to update a metric.\r\n\r\n :param metric_id: the metric_id associated with this endpoint\r\n :return: the metric as a json after the update (in case of success)\r\n \"\"\"\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n\r\n # Validate and deserialize input\r\n metric = get_metric_by_id(metric_id)\r\n self.load(json_data, metric, db.session, partial=True)\r\n\r\n # if it was found and deserialized successfully try to commit\r\n try:\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n return success(json_data)\r\n\r\n def delete(self, metric_id):\r\n \"\"\"\r\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\r\n used to delete a metric result matching the provided metric_id and cob_date.\r\n\r\n :param metric_id: the metric_id associated with this endpoint\r\n :return: the metric as a json after the delete (in case of success)\r\n \"\"\"\r\n metric = get_metric_by_id(metric_id)\r\n # dump as json to send in the end if del is successful\r\n result = self.schema.dump(metric)\r\n\r\n # if result was found, delete it from database\r\n try:\r\n db.session.delete(metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n return success(result)\r\n\r\n\r\nclass QuantModelMetricResource(MetricResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/quant_model/metrics/{metric_id}\".\r\n\r\n This subclass uses everything from the base class and only needs to specify the\r\n appropriate schemas in the constructor.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, PUT, DELETE\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = QuantModelMetricSchema()\r\n schema_collection = QuantModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)\r\n\r\n\r\nclass MlModelMetricResource(MetricResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/ml_model/metrics/{metric_id}\".\r\n\r\n This subclass uses everything from the base class and only needs to specify the\r\n appropriate schemas in the constructor.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, PUT, DELETE\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = MlModelMetricSchema()\r\n schema_collection = MlModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)",
"step-ids": [
16,
19,
23,
25,
26
]
}
|
[
16,
19,
23,
25,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def classFactory(iface):
from .tilemapscaleplugin import TileMapScalePlugin
return TileMapScalePlugin(iface)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
/***************************************************************************
TileMapScalePlugin
A QGIS plugin
Let you add tiled datasets (GDAL WMS) and shows them in the correct scale.
-------------------
begin : 2014-03-03
copyright : (C) 2014 by Matthias Ludwig - Datalyze Solutions
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load TileMapScalePlugin class from file TileMapScalePlugin
from .tilemapscaleplugin import TileMapScalePlugin
return TileMapScalePlugin(iface)
|
flexible
|
{
"blob_id": "f2e2ebd5b848cf3a01b7304e5e194beb3eec1c10",
"index": 1214,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef classFactory(iface):\n from .tilemapscaleplugin import TileMapScalePlugin\n return TileMapScalePlugin(iface)\n",
"step-3": "# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n TileMapScalePlugin\n A QGIS plugin\n Let you add tiled datasets (GDAL WMS) and shows them in the correct scale.\n -------------------\n begin : 2014-03-03\n copyright : (C) 2014 by Matthias Ludwig - Datalyze Solutions\n email : [email protected]\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n This script initializes the plugin, making it known to QGIS.\n\"\"\"\n\ndef classFactory(iface):\n # load TileMapScalePlugin class from file TileMapScalePlugin\n from .tilemapscaleplugin import TileMapScalePlugin\n return TileMapScalePlugin(iface)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class ClusterTestCase(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClusterTestCase(unittest.TestCase):
def test_cluster(self):
n = 10
experiments, outcomes = utilities.load_flu_data()
data = outcomes['infected fraction R1'][0:n, :]
distances = clusterer.calculate_cid(data)
self.assertEqual(distances.shape, (n, n))
clusterer.plot_dendrogram(distances)
plt.draw()
assignment = clusterer.apply_agglomerative_clustering(distances, 2)
self.assertEqual(assignment.shape, (10,))
distances = clusterer.calculate_cid(data, condensed_form=True)
self.assertEqual(distances.shape, sum(np.arange(0, n)))
clusterer.plot_dendrogram(distances)
plt.draw()
plt.close('all')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClusterTestCase(unittest.TestCase):
def test_cluster(self):
n = 10
experiments, outcomes = utilities.load_flu_data()
data = outcomes['infected fraction R1'][0:n, :]
distances = clusterer.calculate_cid(data)
self.assertEqual(distances.shape, (n, n))
clusterer.plot_dendrogram(distances)
plt.draw()
assignment = clusterer.apply_agglomerative_clustering(distances, 2)
self.assertEqual(assignment.shape, (10,))
distances = clusterer.calculate_cid(data, condensed_form=True)
self.assertEqual(distances.shape, sum(np.arange(0, n)))
clusterer.plot_dendrogram(distances)
plt.draw()
plt.close('all')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
import unittest
from ema_workbench.analysis import clusterer
from test import utilities
class ClusterTestCase(unittest.TestCase):
def test_cluster(self):
n = 10
experiments, outcomes = utilities.load_flu_data()
data = outcomes['infected fraction R1'][0:n, :]
distances = clusterer.calculate_cid(data)
self.assertEqual(distances.shape, (n, n))
clusterer.plot_dendrogram(distances)
plt.draw()
assignment = clusterer.apply_agglomerative_clustering(distances, 2)
self.assertEqual(assignment.shape, (10,))
distances = clusterer.calculate_cid(data, condensed_form=True)
self.assertEqual(distances.shape, sum(np.arange(0, n)))
clusterer.plot_dendrogram(distances)
plt.draw()
plt.close('all')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
import unittest
from ema_workbench.analysis import clusterer
from test import utilities
class ClusterTestCase(unittest.TestCase):
def test_cluster(self):
n = 10
experiments, outcomes = utilities.load_flu_data()
data = outcomes["infected fraction R1"][0:n, :]
distances = clusterer.calculate_cid(data)
self.assertEqual(distances.shape, (n, n))
clusterer.plot_dendrogram(distances)
plt.draw()
assignment = clusterer.apply_agglomerative_clustering(distances, 2)
self.assertEqual(assignment.shape, (10,))
distances = clusterer.calculate_cid(data, condensed_form=True)
self.assertEqual(distances.shape, sum(np.arange(0, n)))
clusterer.plot_dendrogram(distances)
plt.draw()
plt.close("all")
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "a7e2b016131dfdb75e537e86875e1b2f19fb3d9d",
"index": 2580,
"step-1": "<mask token>\n\n\nclass ClusterTestCase(unittest.TestCase):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClusterTestCase(unittest.TestCase):\n\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes['infected fraction R1'][0:n, :]\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n plt.close('all')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ClusterTestCase(unittest.TestCase):\n\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes['infected fraction R1'][0:n, :]\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n plt.close('all')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport unittest\nfrom ema_workbench.analysis import clusterer\nfrom test import utilities\n\n\nclass ClusterTestCase(unittest.TestCase):\n\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes['infected fraction R1'][0:n, :]\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n plt.close('all')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\nimport unittest\n\nfrom ema_workbench.analysis import clusterer\nfrom test import utilities\n\n\nclass ClusterTestCase(unittest.TestCase):\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes[\"infected fraction R1\"][0:n, :]\n\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n\n plt.close(\"all\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def print_usage():
sys.stderr.write(
"""
Find the length of the biggest line in the file.
Usage: ./biggestLine <delimiter> <field number - first element is 0> <file path>
"""
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def print_usage():
sys.stderr.write(
"""
Find the length of the biggest line in the file.
Usage: ./biggestLine <delimiter> <field number - first element is 0> <file path>
"""
)
def main():
if len(sys.argv) != 4:
print_usage()
sys.exit(1)
delimiter = sys.argv[1]
field_number = int(sys.argv[2])
file_path = sys.argv[3]
my_file = Path(file_path)
biggest_string = ''
try:
with open(my_file, 'r') as f:
line = f.readline()
line_num = 0
while line:
line_num = line_num + 1
line = f.readline()
curr = line.split(delimiter)[field_number]
if len(curr) > len(biggest_string):
biggest_string = curr
print('Processing Line ' + str(line_num), end='\r')
except IndexError:
print('\nError on line ' + str(line_num))
except KeyboardInterrupt:
sys.exit(0)
except FileNotFoundError:
sys.stderr.write('file not found')
sys.exit(1)
print('biggest string is ' + str(len(biggest_string)) + ' characters')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def print_usage():
sys.stderr.write(
"""
Find the length of the biggest line in the file.
Usage: ./biggestLine <delimiter> <field number - first element is 0> <file path>
"""
)
def main():
if len(sys.argv) != 4:
print_usage()
sys.exit(1)
delimiter = sys.argv[1]
field_number = int(sys.argv[2])
file_path = sys.argv[3]
my_file = Path(file_path)
biggest_string = ''
try:
with open(my_file, 'r') as f:
line = f.readline()
line_num = 0
while line:
line_num = line_num + 1
line = f.readline()
curr = line.split(delimiter)[field_number]
if len(curr) > len(biggest_string):
biggest_string = curr
print('Processing Line ' + str(line_num), end='\r')
except IndexError:
print('\nError on line ' + str(line_num))
except KeyboardInterrupt:
sys.exit(0)
except FileNotFoundError:
sys.stderr.write('file not found')
sys.exit(1)
print('biggest string is ' + str(len(biggest_string)) + ' characters')
main()
<|reserved_special_token_1|>
import sys
from pathlib import Path
def print_usage():
sys.stderr.write(
"""
Find the length of the biggest line in the file.
Usage: ./biggestLine <delimiter> <field number - first element is 0> <file path>
"""
)
def main():
if len(sys.argv) != 4:
print_usage()
sys.exit(1)
delimiter = sys.argv[1]
field_number = int(sys.argv[2])
file_path = sys.argv[3]
my_file = Path(file_path)
biggest_string = ''
try:
with open(my_file, 'r') as f:
line = f.readline()
line_num = 0
while line:
line_num = line_num + 1
line = f.readline()
curr = line.split(delimiter)[field_number]
if len(curr) > len(biggest_string):
biggest_string = curr
print('Processing Line ' + str(line_num), end='\r')
except IndexError:
print('\nError on line ' + str(line_num))
except KeyboardInterrupt:
sys.exit(0)
except FileNotFoundError:
sys.stderr.write('file not found')
sys.exit(1)
print('biggest string is ' + str(len(biggest_string)) + ' characters')
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys
from pathlib import Path
def print_usage():
sys.stderr.write('''
Find the length of the biggest line in the file.
Usage: ./biggestLine <delimiter> <field number - first element is 0> <file path>
''')
def main():
if len(sys.argv) != 4:
print_usage()
sys.exit(1)
delimiter = sys.argv[1]
field_number = int(sys.argv[2])
file_path = sys.argv[3]
my_file = Path(file_path)
biggest_string = ""
try:
with open(my_file, 'r') as f:
line = f.readline()
line_num = 0
while line:
line_num = line_num + 1
line = f.readline()
curr = line.split(delimiter)[field_number]
if len(curr) > len(biggest_string):
biggest_string = curr
print('Processing Line ' + str(line_num), end='\r')
except IndexError:
print('\nError on line '+str(line_num))
except KeyboardInterrupt:
sys.exit(0)
except FileNotFoundError:
sys.stderr.write('file not found')
sys.exit(1)
print("biggest string is " + str(len(biggest_string)) + " characters")
main()
|
flexible
|
{
"blob_id": "c84175edb88f5b9219c22ec717ec30bb530982a2",
"index": 2861,
"step-1": "<mask token>\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n my_file = Path(file_path)\n biggest_string = ''\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line ' + str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n print('biggest string is ' + str(len(biggest_string)) + ' characters')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n my_file = Path(file_path)\n biggest_string = ''\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line ' + str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n print('biggest string is ' + str(len(biggest_string)) + ' characters')\n\n\nmain()\n",
"step-4": "import sys\nfrom pathlib import Path\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n my_file = Path(file_path)\n biggest_string = ''\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line ' + str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n print('biggest string is ' + str(len(biggest_string)) + ' characters')\n\n\nmain()\n",
"step-5": "#!/usr/bin/env python3\nimport sys\nfrom pathlib import Path\n\n\ndef print_usage():\n sys.stderr.write('''\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n ''')\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n\n my_file = Path(file_path)\n\n biggest_string = \"\"\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line '+str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n\n print(\"biggest string is \" + str(len(biggest_string)) + \" characters\")\n\n\nmain()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from nose.tools import assert_equal
def rec_coin(target, coins):
'''
INPUT: Target change amount and list of coin values
OUTPUT: Minimum coins needed to make change
Note, this solution is not optimized.
'''
# Default to target value
min_coins = target
# Check to see if we have a single coin match (BASE CASE)
if target in coins:
return 1
else:
# for every coin value that is <= than target
for i in [c for c in coins if c <= target]:
# Recursive Call (add a count coin and subtract from the target)
num_coins = 1 + rec_coin(target-i, coins)
# Reset Minimum if we have a new minimum
if num_coins < min_coins:
min_coins = num_coins
return min_coins
# consider using decorators to encapsulate memoization
def rec_coin_dynam(target, coins, known_results):
'''
INPUT: This function takes in a target amount and a list of possible coins to use.
It also takes a third parameter, known_results, indicating previously calculated results.
The known_results parameter shoud be started with [0] * (target+1)
OUTPUT: Minimum number of coins needed to make the target.
'''
# Default output to target
min_coins = target
# Base Case
if target in coins:
known_results[target] = 1
return 1
# Return a known result if it happens to be greater than 0
elif known_results[target] > 0:
return known_results[target]
else:
# for every coin value that is <= than target
for i in [c for c in coins if c <= target]:
# Recursive call, note how we include the known results!
num_coins = 1 + rec_coin_dynam(target-i, coins, known_results)
# Reset Minimum if we have a new minimum
if num_coins < min_coins:
min_coins = num_coins
# Reset the known result
known_results[target] = min_coins
return min_coins
def bottom_up_solution(n, coins):
# intialize the array
arr = [0] + [n]*(n)
for i in range(1, len(arr)):
min_coins = n
for coin in [c for c in coins if c <= i]:
min_coins = min(arr[i-coin] + 1, min_coins)
arr[i] = min_coins
return arr[n]
class TestCoins(object):
def check(self, solution):
coins = [1, 5, 10, 25]
assert_equal(solution(45, coins, [0]*(45+1)), 3)
assert_equal(solution(23, coins, [0]*(23+1)), 5)
assert_equal(solution(74, coins, [0]*(74+1)), 8)
print('Passed all tests.')
# Run Test
# test = TestCoins()
# test.check(rec_coin_dynam)
# print(bottom_up_solution(6, [1, 2, 5]))
# dynamic solution
target = 23
coins = [1, 2, 5, 10, 20]
known_results = [0]*(target+1)
print(rec_coin_dynam(target, coins, known_results))
|
normal
|
{
"blob_id": "f8c30f8ccd1b901fd750a2c9e14cab78e1d12a14",
"index": 4039,
"step-1": "<mask token>\n\n\ndef rec_coin(target, coins):\n \"\"\"\n INPUT: Target change amount and list of coin values\n OUTPUT: Minimum coins needed to make change\n\n Note, this solution is not optimized.\n \"\"\"\n min_coins = target\n if target in coins:\n return 1\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin(target - i, coins)\n if num_coins < min_coins:\n min_coins = num_coins\n return min_coins\n\n\ndef rec_coin_dynam(target, coins, known_results):\n \"\"\"\n INPUT: This function takes in a target amount and a list of possible coins to use.\n It also takes a third parameter, known_results, indicating previously calculated results.\n The known_results parameter shoud be started with [0] * (target+1)\n\n OUTPUT: Minimum number of coins needed to make the target.\n \"\"\"\n min_coins = target\n if target in coins:\n known_results[target] = 1\n return 1\n elif known_results[target] > 0:\n return known_results[target]\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin_dynam(target - i, coins, known_results)\n if num_coins < min_coins:\n min_coins = num_coins\n known_results[target] = min_coins\n return min_coins\n\n\n<mask token>\n\n\nclass TestCoins(object):\n\n def check(self, solution):\n coins = [1, 5, 10, 25]\n assert_equal(solution(45, coins, [0] * (45 + 1)), 3)\n assert_equal(solution(23, coins, [0] * (23 + 1)), 5)\n assert_equal(solution(74, coins, [0] * (74 + 1)), 8)\n print('Passed all tests.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rec_coin(target, coins):\n \"\"\"\n INPUT: Target change amount and list of coin values\n OUTPUT: Minimum coins needed to make change\n\n Note, this solution is not optimized.\n \"\"\"\n min_coins = target\n if target in coins:\n return 1\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin(target - i, coins)\n if num_coins < min_coins:\n min_coins = num_coins\n return min_coins\n\n\ndef rec_coin_dynam(target, coins, known_results):\n \"\"\"\n INPUT: This function takes in a target amount and a list of possible coins to use.\n It also takes a third parameter, known_results, indicating previously calculated results.\n The known_results parameter shoud be started with [0] * (target+1)\n\n OUTPUT: Minimum number of coins needed to make the target.\n \"\"\"\n min_coins = target\n if target in coins:\n known_results[target] = 1\n return 1\n elif known_results[target] > 0:\n return known_results[target]\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin_dynam(target - i, coins, known_results)\n if num_coins < min_coins:\n min_coins = num_coins\n known_results[target] = min_coins\n return min_coins\n\n\ndef bottom_up_solution(n, coins):\n arr = [0] + [n] * n\n for i in range(1, len(arr)):\n min_coins = n\n for coin in [c for c in coins if c <= i]:\n min_coins = min(arr[i - coin] + 1, min_coins)\n arr[i] = min_coins\n return arr[n]\n\n\nclass TestCoins(object):\n\n def check(self, solution):\n coins = [1, 5, 10, 25]\n assert_equal(solution(45, coins, [0] * (45 + 1)), 3)\n assert_equal(solution(23, coins, [0] * (23 + 1)), 5)\n assert_equal(solution(74, coins, [0] * (74 + 1)), 8)\n print('Passed all tests.')\n\n\n<mask token>\nprint(rec_coin_dynam(target, coins, known_results))\n",
"step-3": "<mask token>\n\n\ndef rec_coin(target, coins):\n \"\"\"\n INPUT: Target change amount and list of coin values\n OUTPUT: Minimum coins needed to make change\n\n Note, this solution is not optimized.\n \"\"\"\n min_coins = target\n if target in coins:\n return 1\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin(target - i, coins)\n if num_coins < min_coins:\n min_coins = num_coins\n return min_coins\n\n\ndef rec_coin_dynam(target, coins, known_results):\n \"\"\"\n INPUT: This function takes in a target amount and a list of possible coins to use.\n It also takes a third parameter, known_results, indicating previously calculated results.\n The known_results parameter shoud be started with [0] * (target+1)\n\n OUTPUT: Minimum number of coins needed to make the target.\n \"\"\"\n min_coins = target\n if target in coins:\n known_results[target] = 1\n return 1\n elif known_results[target] > 0:\n return known_results[target]\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin_dynam(target - i, coins, known_results)\n if num_coins < min_coins:\n min_coins = num_coins\n known_results[target] = min_coins\n return min_coins\n\n\ndef bottom_up_solution(n, coins):\n arr = [0] + [n] * n\n for i in range(1, len(arr)):\n min_coins = n\n for coin in [c for c in coins if c <= i]:\n min_coins = min(arr[i - coin] + 1, min_coins)\n arr[i] = min_coins\n return arr[n]\n\n\nclass TestCoins(object):\n\n def check(self, solution):\n coins = [1, 5, 10, 25]\n assert_equal(solution(45, coins, [0] * (45 + 1)), 3)\n assert_equal(solution(23, coins, [0] * (23 + 1)), 5)\n assert_equal(solution(74, coins, [0] * (74 + 1)), 8)\n print('Passed all tests.')\n\n\ntarget = 23\ncoins = [1, 2, 5, 10, 20]\nknown_results = [0] * (target + 1)\nprint(rec_coin_dynam(target, coins, known_results))\n",
"step-4": "from nose.tools import assert_equal\n\n\ndef rec_coin(target, coins):\n \"\"\"\n INPUT: Target change amount and list of coin values\n OUTPUT: Minimum coins needed to make change\n\n Note, this solution is not optimized.\n \"\"\"\n min_coins = target\n if target in coins:\n return 1\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin(target - i, coins)\n if num_coins < min_coins:\n min_coins = num_coins\n return min_coins\n\n\ndef rec_coin_dynam(target, coins, known_results):\n \"\"\"\n INPUT: This function takes in a target amount and a list of possible coins to use.\n It also takes a third parameter, known_results, indicating previously calculated results.\n The known_results parameter shoud be started with [0] * (target+1)\n\n OUTPUT: Minimum number of coins needed to make the target.\n \"\"\"\n min_coins = target\n if target in coins:\n known_results[target] = 1\n return 1\n elif known_results[target] > 0:\n return known_results[target]\n else:\n for i in [c for c in coins if c <= target]:\n num_coins = 1 + rec_coin_dynam(target - i, coins, known_results)\n if num_coins < min_coins:\n min_coins = num_coins\n known_results[target] = min_coins\n return min_coins\n\n\ndef bottom_up_solution(n, coins):\n arr = [0] + [n] * n\n for i in range(1, len(arr)):\n min_coins = n\n for coin in [c for c in coins if c <= i]:\n min_coins = min(arr[i - coin] + 1, min_coins)\n arr[i] = min_coins\n return arr[n]\n\n\nclass TestCoins(object):\n\n def check(self, solution):\n coins = [1, 5, 10, 25]\n assert_equal(solution(45, coins, [0] * (45 + 1)), 3)\n assert_equal(solution(23, coins, [0] * (23 + 1)), 5)\n assert_equal(solution(74, coins, [0] * (74 + 1)), 8)\n print('Passed all tests.')\n\n\ntarget = 23\ncoins = [1, 2, 5, 10, 20]\nknown_results = [0] * (target + 1)\nprint(rec_coin_dynam(target, coins, known_results))\n",
"step-5": "from nose.tools import assert_equal\n\n\ndef rec_coin(target, coins):\n '''\n INPUT: Target change amount and list of coin values\n OUTPUT: Minimum coins needed to make change\n\n Note, this solution is not optimized.\n '''\n\n # Default to target value\n min_coins = target\n\n # Check to see if we have a single coin match (BASE CASE)\n if target in coins:\n return 1\n\n else:\n\n # for every coin value that is <= than target\n for i in [c for c in coins if c <= target]:\n\n # Recursive Call (add a count coin and subtract from the target)\n num_coins = 1 + rec_coin(target-i, coins)\n\n # Reset Minimum if we have a new minimum\n if num_coins < min_coins:\n\n min_coins = num_coins\n\n return min_coins\n\n\n# consider using decorators to encapsulate memoization\n\ndef rec_coin_dynam(target, coins, known_results):\n '''\n INPUT: This function takes in a target amount and a list of possible coins to use.\n It also takes a third parameter, known_results, indicating previously calculated results.\n The known_results parameter shoud be started with [0] * (target+1)\n\n OUTPUT: Minimum number of coins needed to make the target.\n '''\n\n # Default output to target\n min_coins = target\n\n # Base Case\n if target in coins:\n known_results[target] = 1\n return 1\n\n # Return a known result if it happens to be greater than 0\n elif known_results[target] > 0:\n return known_results[target]\n\n else:\n # for every coin value that is <= than target\n for i in [c for c in coins if c <= target]:\n\n # Recursive call, note how we include the known results!\n num_coins = 1 + rec_coin_dynam(target-i, coins, known_results)\n\n # Reset Minimum if we have a new minimum\n if num_coins < min_coins:\n min_coins = num_coins\n\n # Reset the known result\n known_results[target] = min_coins\n\n return min_coins\n\n\ndef bottom_up_solution(n, coins):\n\n # intialize the array\n arr = [0] + [n]*(n)\n\n for i in range(1, len(arr)):\n min_coins = n\n for coin in [c for c in coins if c <= i]:\n min_coins = min(arr[i-coin] + 1, min_coins)\n\n arr[i] = min_coins\n\n return arr[n]\n\n\nclass TestCoins(object):\n\n def check(self, solution):\n coins = [1, 5, 10, 25]\n assert_equal(solution(45, coins, [0]*(45+1)), 3)\n assert_equal(solution(23, coins, [0]*(23+1)), 5)\n assert_equal(solution(74, coins, [0]*(74+1)), 8)\n\n print('Passed all tests.')\n\n\n# Run Test\n# test = TestCoins()\n# test.check(rec_coin_dynam)\n\n# print(bottom_up_solution(6, [1, 2, 5]))\n\n\n# dynamic solution\ntarget = 23\ncoins = [1, 2, 5, 10, 20]\nknown_results = [0]*(target+1)\n\nprint(rec_coin_dynam(target, coins, known_results))\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
N = int(input("Max value N? "))
s = set()
for i in range(2, N + 1):
s.add(i)
for num in sorted(s):
k = num + num
while k <= N:
if k in s:
s.remove(k)
k += num
print("Primes:", end = " ")
for num in sorted(s):
print(num, end = " ")
|
normal
|
{
"blob_id": "bf5422792533f85967a5573d9e6f370a7967a914",
"index": 120,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2, N + 1):\n s.add(i)\nfor num in sorted(s):\n k = num + num\n while k <= N:\n if k in s:\n s.remove(k)\n k += num\nprint('Primes:', end=' ')\nfor num in sorted(s):\n print(num, end=' ')\n",
"step-3": "N = int(input('Max value N? '))\ns = set()\nfor i in range(2, N + 1):\n s.add(i)\nfor num in sorted(s):\n k = num + num\n while k <= N:\n if k in s:\n s.remove(k)\n k += num\nprint('Primes:', end=' ')\nfor num in sorted(s):\n print(num, end=' ')\n",
"step-4": "N = int(input(\"Max value N? \"))\ns = set()\nfor i in range(2, N + 1):\n s.add(i)\nfor num in sorted(s):\n k = num + num\n while k <= N:\n if k in s:\n s.remove(k)\n k += num\nprint(\"Primes:\", end = \" \")\nfor num in sorted(s):\n print(num, end = \" \")\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
These are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import urllib
try:
# For Python 3.0 and later
import urllib.request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import re
import glob
import os
import arcpy
from arcpy.sa import *
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = "http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/"
dir_path = "Y{:}/M{:}/".format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
hdf_name = soup.find_all('', {
'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',
'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith("us_ssmv"):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics="SUM", outcellsize='1000', monthRange='', yearRange='',
path="H:/GIS/SNODAS/SNWDS/", outpath="H:/GIS/SNODAS.gdb/", area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
# arcpy.env.mask = area
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[1] + 1): # set months converted here
g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year
for name in sorted(
glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:
g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
# print(g[code+str(y)+str(m).zfill(2)])
# ifnull = 'in_memory/ifnull'
# arcpy sa functions that summarize the daily data to monthly data
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,
ignore_nodata="DATA")
div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001
calc = Con(div < 0.0, 0.0, div) # remove negative and null values
ifnull = Con(IsNull(calc), 0, calc) # remove null
# WKID 102039
outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis
# define save path for file
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]
memoryFeature = "in_memory/myMemoryFeature"
# memoryFeature = outnm
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
# Execute ExtractByMask to clip snodas data to Utah watersheds
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management("in_memory")
def totalavg(code, statistics="MEAN", monthRange=[1, 12], yearRange=[2003, 2016],
path="H:/GIS/SNODAS/SNODASproj.gdb/", outpath="H:/GIS/SNODAS/SNODASproj.gdb/"):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
# iterate over month range set here; default is 1 to 12 (Jan to Dec)
for m in range(monthRange[0], monthRange[1] + 1):
# this defines the dictionary key based on data type, month, and year
g[code + '0000' + str(m).zfill(2)] = []
# pick all tiff files from raw data folder of a data type
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here
# create a list of rasters with the right code and month and year
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:
g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata="DATA")
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "afb09f9d5860994f38e8553b19e7ebc339cc2df6",
"index": 8785,
"step-1": "<mask token>\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\n<mask token>\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\n<mask token>\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\n<mask token>\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],\n 'PLE': [0, 0, 0, 1]}\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n v = f.split('\\\\')[1].split('.')[2][-2:]\n h = f.split('\\\\')[1].split('.')[2][1:3]\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:]\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:]\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=\n f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n arcpy.CheckOutExtension('Spatial')\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n arcpy.env.mask = area\n arcpy.CheckOutExtension('spatial')\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[\n 10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:\n 11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',\n outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange\n =[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension('spatial')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n HDRFILE_STRING = \"\"\"byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n\"\"\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\n<mask token>\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],\n 'PLE': [0, 0, 0, 1]}\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n v = f.split('\\\\')[1].split('.')[2][-2:]\n h = f.split('\\\\')[1].split('.')[2][1:3]\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:]\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:]\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=\n f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n arcpy.CheckOutExtension('Spatial')\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n arcpy.env.mask = area\n arcpy.CheckOutExtension('spatial')\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[\n 10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:\n 11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',\n outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange\n =[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension('spatial')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n HDRFILE_STRING = \"\"\"byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n\"\"\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\ndef snow_summary(code, scalingFactor, statistics='SUM', outcellsize='1000',\n monthRange='', yearRange='', path='H:/GIS/SNODAS/SNWDS/', outpath=\n 'H:/GIS/SNODAS.gdb/', area=''):\n \"\"\"\n summarizes daily SNODAS data to monthly values\n\n INPUT\n -----\n code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'\n scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/\n statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,\n RANGE, STD, SUM, or VARIETY\n monthRange = len 2 list; begin and end month of data you wish to analyze\n yearRange = len 2 list; bengin and end year of data you wish to analyze\n path = directory where raw geoTiffs are located\n outpath = directory where final data will be stored\n\n OUTPUT\n ------\n projected and scaled monthly rasters\n\n \"\"\"\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n for y in range(yearRange[0], yearRange[1] + 1):\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + str(y) + str(m).zfill(2)] = []\n for name in sorted(glob.glob(path + code + '*.tif')):\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]\n ) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2\n )], statistics_type=statistics, ignore_nodata='DATA')\n div = Divide(cellstats, scalingFactor)\n calc = Con(div < 0.0, 0.0, div)\n ifnull = Con(IsNull(calc), 0, calc)\n outCS = arcpy.SpatialReference(102039)\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2\n ) + statstype[statistics]\n memoryFeature = 'in_memory/myMemoryFeature'\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS,\n 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management('in_memory')\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_modis(tiles, save_path, months='', years=''):\n \"\"\"The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.\n\n :param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n :param save_path: name of output file name\n :param months: months of interest; defaults to [1,12]\n :param years: years of interest; defaults to [2000,2015]\n :return: saves files in outpath\n \"\"\"\n from bs4 import BeautifulSoup\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for tile in tiles:\n for yr in yrs:\n for m in mons:\n base_url = (\n 'http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/'\n )\n dir_path = 'Y{:}/M{:}/'.format(yr, m)\n url = base_url + dir_path\n soup = BeautifulSoup(urllib2.urlopen(url), 'lxml')\n hdf_name = soup.find_all('', {'href': re.compile(\n 'MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.\n IGNORECASE)})\n files = urllib.urlretrieve(url + hdf_name[0].text, \n save_path + hdf_name[0].text)\n print(save_path + hdf_name[0].text)\n time.sleep(0.5)\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],\n 'PLE': [0, 0, 0, 1]}\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n v = f.split('\\\\')[1].split('.')[2][-2:]\n h = f.split('\\\\')[1].split('.')[2][1:3]\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:]\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:]\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=\n f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n arcpy.CheckOutExtension('Spatial')\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n arcpy.env.mask = area\n arcpy.CheckOutExtension('spatial')\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[\n 10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:\n 11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',\n outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange\n =[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension('spatial')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n HDRFILE_STRING = \"\"\"byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n\"\"\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\ndef snow_summary(code, scalingFactor, statistics='SUM', outcellsize='1000',\n monthRange='', yearRange='', path='H:/GIS/SNODAS/SNWDS/', outpath=\n 'H:/GIS/SNODAS.gdb/', area=''):\n \"\"\"\n summarizes daily SNODAS data to monthly values\n\n INPUT\n -----\n code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'\n scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/\n statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,\n RANGE, STD, SUM, or VARIETY\n monthRange = len 2 list; begin and end month of data you wish to analyze\n yearRange = len 2 list; bengin and end year of data you wish to analyze\n path = directory where raw geoTiffs are located\n outpath = directory where final data will be stored\n\n OUTPUT\n ------\n projected and scaled monthly rasters\n\n \"\"\"\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n for y in range(yearRange[0], yearRange[1] + 1):\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + str(y) + str(m).zfill(2)] = []\n for name in sorted(glob.glob(path + code + '*.tif')):\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]\n ) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2\n )], statistics_type=statistics, ignore_nodata='DATA')\n div = Divide(cellstats, scalingFactor)\n calc = Con(div < 0.0, 0.0, div)\n ifnull = Con(IsNull(calc), 0, calc)\n outCS = arcpy.SpatialReference(102039)\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2\n ) + statstype[statistics]\n memoryFeature = 'in_memory/myMemoryFeature'\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS,\n 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management('in_memory')\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-5": "\"\"\"\nThese are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport time\nimport urllib\ntry:\n # For Python 3.0 and later\n import urllib.request\nexcept ImportError:\n # Fall back to Python 2's urllib2\n import urllib2\n\nimport re\nimport glob\nimport os\nimport arcpy\nfrom arcpy.sa import *\n\n\ndef get_modis(tiles, save_path, months='', years=''):\n \"\"\"The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.\n\n :param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n :param save_path: name of output file name\n :param months: months of interest; defaults to [1,12]\n :param years: years of interest; defaults to [2000,2015]\n :return: saves files in outpath\n \"\"\"\n\n\n from bs4 import BeautifulSoup\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n\n mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n\n for tile in tiles:\n for yr in yrs:\n for m in mons:\n base_url = \"http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/\"\n\n dir_path = \"Y{:}/M{:}/\".format(yr, m)\n url = base_url + dir_path\n soup = BeautifulSoup(urllib2.urlopen(url), \"lxml\")\n hdf_name = soup.find_all('', {\n 'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})\n files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)\n print(save_path + hdf_name[0].text)\n time.sleep(0.5)\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n # dictionary to designate a directory\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n # dictionary to select layer from hdf file that contains the datatype\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}\n\n # check for file folder and make it if it doesn't exist\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n\n v = f.split('\\\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename\n h = f.split('\\\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename\n\n # names file based on time span of input rasters; 8-day by default\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:] # parse day of year from hdf filename\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:] # parse month from hdf filename\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,\n subset=matrdir[data_type],\n res=1000, epsg=proj)\n # [ET,LE,PET,PLE]\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n # Check out the ArcGIS Spatial Analyst extension license\n arcpy.CheckOutExtension(\"Spatial\")\n\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n\n arcpy.env.mask = area\n arcpy.CheckOutExtension(\"spatial\")\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \\\n \"16_BIT_UNSIGNED\", \"1000\", \"1\", \"LAST\", \"LAST\")\n\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension(\"spatial\")\n\n for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception(\"Could not delete tar archive {0}.\".format(filepath))\n\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n\n import gzip\n\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception(\"Could not delete gz archive {0}.\".format(filepath))\n\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n # hdr file replacment string\n HDRFILE_STRING = \"byteorder M\\nlayout bil\\nnbands 1\\nnbits 16\\nncols 6935\\nnrows 3351\\n\\\n ulxmap -124.729583333331703\\nulymap 52.871249516804028\\nxdim 0.00833333333\\nydim 0.00833333333\\n\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [str(i).zfill(2) + \"_\" + monnames[i - 1] for i in range(months[0], months[1] + 1)]\n\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n\n for yr in yrs:\n for m in mons:\n ftp_addr = \"sidads.colorado.edu\"\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n\n dir_path = \"pub/DATASETS/NOAA/G02158/masked/\" + yr + \"/\" + m + \"/\"\n ftp.cwd(dir_path)\n files = ftp.nlst()\n\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + \"/\" + f, 'wb')\n ftp.retrbinary(\"RETR \" + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',\n 'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n\n for filename in os.listdir(path):\n if filename.startswith(\"us_ssmv\"):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\ndef snow_summary(code, scalingFactor, statistics=\"SUM\", outcellsize='1000', monthRange='', yearRange='',\n path=\"H:/GIS/SNODAS/SNWDS/\", outpath=\"H:/GIS/SNODAS.gdb/\", area=''):\n \"\"\"\n summarizes daily SNODAS data to monthly values\n\n INPUT\n -----\n code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'\n scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/\n statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,\n RANGE, STD, SUM, or VARIETY\n monthRange = len 2 list; begin and end month of data you wish to analyze\n yearRange = len 2 list; bengin and end year of data you wish to analyze\n path = directory where raw geoTiffs are located\n outpath = directory where final data will be stored\n\n OUTPUT\n ------\n projected and scaled monthly rasters\n\n \"\"\"\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n # arcpy.env.mask = area\n\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',\n 'MINORITY': 'MNR',\n 'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n\n for y in range(yearRange[0], yearRange[1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[1] + 1): # set months converted here\n g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year\n for name in sorted(\n glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n # print(g[code+str(y)+str(m).zfill(2)])\n # ifnull = 'in_memory/ifnull'\n # arcpy sa functions that summarize the daily data to monthly data\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,\n ignore_nodata=\"DATA\")\n div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001\n calc = Con(div < 0.0, 0.0, div) # remove negative and null values\n ifnull = Con(IsNull(calc), 0, calc) # remove null\n # WKID 102039\n outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis\n # define save path for file\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]\n memoryFeature = \"in_memory/myMemoryFeature\"\n # memoryFeature = outnm\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n # Execute ExtractByMask to clip snodas data to Utah watersheds\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management(\"in_memory\")\n\n\ndef totalavg(code, statistics=\"MEAN\", monthRange=[1, 12], yearRange=[2003, 2016],\n path=\"H:/GIS/SNODAS/SNODASproj.gdb/\", outpath=\"H:/GIS/SNODAS/SNODASproj.gdb/\"):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',\n 'MINORITY': 'MNR',\n 'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n\n # iterate over month range set here; default is 1 to 12 (Jan to Dec)\n for m in range(monthRange[0], monthRange[1] + 1):\n\n # this defines the dictionary key based on data type, month, and year\n g[code + '0000' + str(m).zfill(2)] = []\n\n # pick all tiff files from raw data folder of a data type\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here\n\n # create a list of rasters with the right code and month and year\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n # arcpy sa functions that summarize the daily data to monthly data\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata=\"DATA\")\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
11,
12,
13,
16
]
}
|
[
6,
11,
12,
13,
16
] |
"""
题目描述
HZ偶尔会拿些专业问题来忽悠那些非计算机专业的同学。
今天测试组开完会后,他又发话了:在古老的一维模式识别中,
常常需要计算连续子向量的最大和,当向量全为正数的时候,问题很好解决。
但是,如果向量中包含负数,是否应该包含某个负数,并期望旁边的正数会弥补它呢?
例如:{6,-3,-2,7,-15,1,2,2},连续子向量的最大和为8(从第0个开始,到第3个为止)。
给一个数组,返回它的最大连续子序列的和,你会不会被他忽悠住?(子向量的长度至少是1)
"""
# -*- coding:utf-8 -*-
class Solution:
def FindGreatestSumOfSubArray(self, array):
# write code here
# 以i结尾的数组长度,max(array[i], dp[i-1]+array[i])
dp = [array[0]]
res = array[0]
for i in range(1, len(array)):
temp = max(dp[i-1]+array[i], array[i])
dp.append(temp)
if temp > res:
res = temp
return res
s = Solution()
print(s.FindGreatestSumOfSubArray([6,-3,-2,7,-15,1,2,2]))
|
normal
|
{
"blob_id": "fcca845b60b050fa5dd0a3c50b3c36c154022f07",
"index": 1467,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def FindGreatestSumOfSubArray(self, array):\n dp = [array[0]]\n res = array[0]\n for i in range(1, len(array)):\n temp = max(dp[i - 1] + array[i], array[i])\n dp.append(temp)\n if temp > res:\n res = temp\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def FindGreatestSumOfSubArray(self, array):\n dp = [array[0]]\n res = array[0]\n for i in range(1, len(array)):\n temp = max(dp[i - 1] + array[i], array[i])\n dp.append(temp)\n if temp > res:\n res = temp\n return res\n\n\n<mask token>\nprint(s.FindGreatestSumOfSubArray([6, -3, -2, 7, -15, 1, 2, 2]))\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def FindGreatestSumOfSubArray(self, array):\n dp = [array[0]]\n res = array[0]\n for i in range(1, len(array)):\n temp = max(dp[i - 1] + array[i], array[i])\n dp.append(temp)\n if temp > res:\n res = temp\n return res\n\n\ns = Solution()\nprint(s.FindGreatestSumOfSubArray([6, -3, -2, 7, -15, 1, 2, 2]))\n",
"step-5": "\"\"\"\n题目描述\nHZ偶尔会拿些专业问题来忽悠那些非计算机专业的同学。\n今天测试组开完会后,他又发话了:在古老的一维模式识别中,\n常常需要计算连续子向量的最大和,当向量全为正数的时候,问题很好解决。\n但是,如果向量中包含负数,是否应该包含某个负数,并期望旁边的正数会弥补它呢?\n例如:{6,-3,-2,7,-15,1,2,2},连续子向量的最大和为8(从第0个开始,到第3个为止)。\n给一个数组,返回它的最大连续子序列的和,你会不会被他忽悠住?(子向量的长度至少是1)\n\"\"\"\n# -*- coding:utf-8 -*-\nclass Solution:\n def FindGreatestSumOfSubArray(self, array):\n # write code here\n # 以i结尾的数组长度,max(array[i], dp[i-1]+array[i])\n dp = [array[0]]\n res = array[0]\n for i in range(1, len(array)):\n temp = max(dp[i-1]+array[i], array[i])\n dp.append(temp)\n if temp > res:\n res = temp\n return res\n\ns = Solution()\nprint(s.FindGreatestSumOfSubArray([6,-3,-2,7,-15,1,2,2]))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""Splits the google speech commands into train, validation and test sets.
"""
import os
import shutil
import argparse
def move_files(src_folder, to_folder, list_file):
with open(list_file) as f:
for line in f.readlines():
line = line.rstrip()
dirname = os.path.dirname(line)
dest = os.path.join(to_folder, dirname)
if not os.path.exists(dest):
os.mkdir(dest)
shutil.move(os.path.join(src_folder, line), dest)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Split google commands train dataset.')
parser.add_argument('root', type=str, help='the path to the root folder of te google commands train dataset.')
args = parser.parse_args()
audio_folder = os.path.join(args.root, 'audio')
validation_path = os.path.join(audio_folder, 'validation_list.txt')
test_path = os.path.join(audio_folder, 'testing_list.txt')
valid_folder = os.path.join(args.root, 'valid')
test_folder = os.path.join(args.root, 'test')
train_folder = os.path.join(args.root, 'train')
os.mkdir(valid_folder)
os.mkdir(test_folder)
move_files(audio_folder, test_folder, test_path)
move_files(audio_folder, valid_folder, validation_path)
os.rename(audio_folder, train_folder)
|
normal
|
{
"blob_id": "6b2fc94d9a53b8f669cab5e1fb625dd01e20ba98",
"index": 664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef move_files(src_folder, to_folder, list_file):\n with open(list_file) as f:\n for line in f.readlines():\n line = line.rstrip()\n dirname = os.path.dirname(line)\n dest = os.path.join(to_folder, dirname)\n if not os.path.exists(dest):\n os.mkdir(dest)\n shutil.move(os.path.join(src_folder, line), dest)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef move_files(src_folder, to_folder, list_file):\n with open(list_file) as f:\n for line in f.readlines():\n line = line.rstrip()\n dirname = os.path.dirname(line)\n dest = os.path.join(to_folder, dirname)\n if not os.path.exists(dest):\n os.mkdir(dest)\n shutil.move(os.path.join(src_folder, line), dest)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Split google commands train dataset.')\n parser.add_argument('root', type=str, help=\n 'the path to the root folder of te google commands train dataset.')\n args = parser.parse_args()\n audio_folder = os.path.join(args.root, 'audio')\n validation_path = os.path.join(audio_folder, 'validation_list.txt')\n test_path = os.path.join(audio_folder, 'testing_list.txt')\n valid_folder = os.path.join(args.root, 'valid')\n test_folder = os.path.join(args.root, 'test')\n train_folder = os.path.join(args.root, 'train')\n os.mkdir(valid_folder)\n os.mkdir(test_folder)\n move_files(audio_folder, test_folder, test_path)\n move_files(audio_folder, valid_folder, validation_path)\n os.rename(audio_folder, train_folder)\n",
"step-4": "<mask token>\nimport os\nimport shutil\nimport argparse\n\n\ndef move_files(src_folder, to_folder, list_file):\n with open(list_file) as f:\n for line in f.readlines():\n line = line.rstrip()\n dirname = os.path.dirname(line)\n dest = os.path.join(to_folder, dirname)\n if not os.path.exists(dest):\n os.mkdir(dest)\n shutil.move(os.path.join(src_folder, line), dest)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Split google commands train dataset.')\n parser.add_argument('root', type=str, help=\n 'the path to the root folder of te google commands train dataset.')\n args = parser.parse_args()\n audio_folder = os.path.join(args.root, 'audio')\n validation_path = os.path.join(audio_folder, 'validation_list.txt')\n test_path = os.path.join(audio_folder, 'testing_list.txt')\n valid_folder = os.path.join(args.root, 'valid')\n test_folder = os.path.join(args.root, 'test')\n train_folder = os.path.join(args.root, 'train')\n os.mkdir(valid_folder)\n os.mkdir(test_folder)\n move_files(audio_folder, test_folder, test_path)\n move_files(audio_folder, valid_folder, validation_path)\n os.rename(audio_folder, train_folder)\n",
"step-5": "\"\"\"Splits the google speech commands into train, validation and test sets.\n\"\"\"\n\nimport os\nimport shutil\nimport argparse\n\ndef move_files(src_folder, to_folder, list_file):\n with open(list_file) as f:\n for line in f.readlines():\n line = line.rstrip()\n dirname = os.path.dirname(line)\n dest = os.path.join(to_folder, dirname)\n if not os.path.exists(dest):\n os.mkdir(dest)\n shutil.move(os.path.join(src_folder, line), dest)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Split google commands train dataset.')\n parser.add_argument('root', type=str, help='the path to the root folder of te google commands train dataset.')\n args = parser.parse_args()\n\n audio_folder = os.path.join(args.root, 'audio')\n validation_path = os.path.join(audio_folder, 'validation_list.txt')\n test_path = os.path.join(audio_folder, 'testing_list.txt')\n\n valid_folder = os.path.join(args.root, 'valid')\n test_folder = os.path.join(args.root, 'test')\n train_folder = os.path.join(args.root, 'train')\n os.mkdir(valid_folder)\n os.mkdir(test_folder)\n\n move_files(audio_folder, test_folder, test_path)\n move_files(audio_folder, valid_folder, validation_path)\n os.rename(audio_folder, train_folder)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Package for django_static_template.
"""
|
normal
|
{
"blob_id": "818623621b609d67f8f657be4ade6e3bb86a0bc5",
"index": 4226,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\r\nPackage for django_static_template.\r\n\"\"\"\r\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check(data):
global ss
global s
for line in data:
s += int(line)
if ss.get(s, False):
return s
ss[s] = True
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('./01-data.txt') as f:
data = f.read().splitlines()
<|reserved_special_token_0|>
def check(data):
global ss
global s
for line in data:
s += int(line)
if ss.get(s, False):
return s
ss[s] = True
return None
<|reserved_special_token_0|>
print('after first pass:', s)
while v is None:
v = check(data)
print('first duplicate:', v)
<|reserved_special_token_1|>
data = None
with open('./01-data.txt') as f:
data = f.read().splitlines()
ss = {}
s = 0
ss[s] = True
def check(data):
global ss
global s
for line in data:
s += int(line)
if ss.get(s, False):
return s
ss[s] = True
return None
v = check(data)
print('after first pass:', s)
while v is None:
v = check(data)
print('first duplicate:', v)
<|reserved_special_token_1|>
#!/usr/bin/env python3
data = None
with open('./01-data.txt') as f:
data = f.read().splitlines()
ss = {}
s = 0
ss[s] = True
def check(data):
global ss
global s
for line in data:
s += int(line)
if ss.get(s, False):
return s
ss[s] = True
return None
v = check(data)
print('after first pass:', s)
while v is None:
v = check(data)
print('first duplicate:', v)
|
flexible
|
{
"blob_id": "7e1dd242c60ee12dfc4130e379fa35ae626a4d63",
"index": 5217,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check(data):\n global ss\n global s\n for line in data:\n s += int(line)\n if ss.get(s, False):\n return s\n ss[s] = True\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open('./01-data.txt') as f:\n data = f.read().splitlines()\n<mask token>\n\n\ndef check(data):\n global ss\n global s\n for line in data:\n s += int(line)\n if ss.get(s, False):\n return s\n ss[s] = True\n return None\n\n\n<mask token>\nprint('after first pass:', s)\nwhile v is None:\n v = check(data)\nprint('first duplicate:', v)\n",
"step-4": "data = None\nwith open('./01-data.txt') as f:\n data = f.read().splitlines()\nss = {}\ns = 0\nss[s] = True\n\n\ndef check(data):\n global ss\n global s\n for line in data:\n s += int(line)\n if ss.get(s, False):\n return s\n ss[s] = True\n return None\n\n\nv = check(data)\nprint('after first pass:', s)\nwhile v is None:\n v = check(data)\nprint('first duplicate:', v)\n",
"step-5": "#!/usr/bin/env python3\n\ndata = None\n\nwith open('./01-data.txt') as f:\n data = f.read().splitlines()\n\nss = {}\ns = 0\nss[s] = True\n\ndef check(data):\n global ss\n global s\n for line in data:\n s += int(line)\n\n if ss.get(s, False):\n return s\n\n ss[s] = True\n return None\n\n\nv = check(data)\nprint('after first pass:', s)\nwhile v is None:\n v = check(data)\nprint('first duplicate:', v)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from turtle import *
while True:
n=input("Right or left? ")
if n == 'right':
right(60)
forward(100)
elif n == 'left':
left(60)
forward(100)
|
normal
|
{
"blob_id": "6f698196e9391d73bd99cda0a098a5bf7a3832ff",
"index": 963,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n n = input('Right or left? ')\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"step-3": "from turtle import *\nwhile True:\n n = input('Right or left? ')\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"step-4": "from turtle import *\nwhile True:\n n=input(\"Right or left? \")\n\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MyConnection(Connection):
<|reserved_special_token_0|>
def get_none1(self):
"""No return type is specified."""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
<|reserved_special_token_0|>
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
<|reserved_special_token_0|>
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
<|reserved_special_token_0|>
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
<|reserved_special_token_0|>
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyConnection(Connection):
<|reserved_special_token_0|>
def get_none1(self):
"""No return type is specified."""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
<|reserved_special_token_0|>
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
<|reserved_special_token_0|>
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
<|reserved_special_token_0|>
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
<|reserved_special_token_0|>
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
<|reserved_special_token_0|>
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyConnection(Connection):
<|reserved_special_token_0|>
def get_none1(self):
"""No return type is specified."""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_string1(self):
""":obj:`str`: A string value."""
pass
<|reserved_special_token_0|>
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
<|reserved_special_token_0|>
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
<|reserved_special_token_0|>
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
<|reserved_special_token_0|>
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
<|reserved_special_token_0|>
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
<|reserved_special_token_0|>
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyConnection(Connection):
def __init__(self, record):
super(MyConnection, self).__init__(record)
def get_none1(self):
"""No return type is specified."""
pass
def get_none2(self, channel):
"""This function takes 1 input but returns nothing.
Parameters
----------
channel : :obj:`str`
Some channel number
"""
pass
def get_bool1(self):
""":obj:`bool`: A boolean value."""
pass
def get_bool2(self):
"""Returns a boolean value.
Returns
-------
:obj:`bool`
A boolean value.
"""
pass
def get_string1(self):
""":obj:`str`: A string value."""
pass
def get_string2(self):
"""Returns a string value.
Returns
-------
:obj:`str`
A string value.
"""
pass
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
def get_float1(self):
""":obj:`float`: A floating-point value."""
pass
def get_float2(self):
"""Returns a floating-point value.
Returns
-------
:obj:`float`
A floating-point value.
"""
pass
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
def get_list_of_bool2(self):
"""A list of boolean values.
Returns
-------
:obj:`list` of :obj:`bool`
A list of boolean values.
"""
pass
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
def get_list_of_bytes1(self):
""":obj:`list` of :obj:`bytes`: A list of bytes values."""
pass
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
def get_list_of_float2(self):
"""A list of floating-point values.
Returns
-------
:obj:`list` of :obj:`float`
A list of floating-point values.
"""
pass
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
def get_dict_of_bool2(self):
"""A dictionary of boolean values.
Returns
-------
:obj:`dict` of :obj:`bool`
A dictionary of boolean values.
"""
pass
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
def get_dict_of_int1(self):
""":obj:`dict` of :obj:`int`: A dictionary of integer values."""
pass
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
def get_dict_of_float1(self):
""":obj:`dict` of :obj:`float`: A dictionary of floating-point values."""
pass
def get_dict_of_float2(self):
"""A dictionary of floating-point values.
Returns
-------
:obj:`dict` of :obj:`float`
A dictionary of floating-point values.
"""
pass
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
def test_return_type_builtin():
demo = ConnectionDemo(EquipmentRecord(), MyConnection)
assert demo.get_none1() is None
assert demo.get_none2() is None
assert isinstance(demo.get_bool1(), bool)
assert isinstance(demo.get_bool2(), bool)
assert isinstance(demo.get_string1(), str)
assert isinstance(demo.get_string2(), str)
assert isinstance(demo.get_bytes1(), bytes)
assert isinstance(demo.get_bytes2(), bytes)
assert isinstance(demo.get_int1(), int)
assert isinstance(demo.get_int2(), int)
assert isinstance(demo.get_float1(), float)
assert isinstance(demo.get_float2(), float)
x = demo.get_list_of_bool1()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_bool2()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_str1()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_str2()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_bytes1()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_bytes2()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_int1()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_int2()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_float1()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_list_of_float2()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_dict_of_bool1()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_bool2()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_str1()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_str2()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_bytes1()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_bytes2()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_int1()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_int2()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_float1()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_dict_of_float2()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_multiple1()
assert len(x) == 5
assert isinstance(x[0], str)
assert isinstance(x[1], float)
assert isinstance(x[2], float)
assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)
assert isinstance(x[4], bytes)
def test_return_type_object():
scope = ConnectionDemo(EquipmentRecord(), PicoScope)
x = scope.channel()
assert isinstance(x, dict) and x['demo'] == PicoScopeChannel
<|reserved_special_token_1|>
from msl.equipment.connection import Connection
from msl.equipment.connection_demo import ConnectionDemo
from msl.equipment.record_types import EquipmentRecord
from msl.equipment.resources.picotech.picoscope.picoscope import PicoScope
from msl.equipment.resources.picotech.picoscope.channel import PicoScopeChannel
class MyConnection(Connection):
def __init__(self, record):
super(MyConnection, self).__init__(record)
def get_none1(self):
"""No return type is specified."""
pass
def get_none2(self, channel):
"""This function takes 1 input but returns nothing.
Parameters
----------
channel : :obj:`str`
Some channel number
"""
pass
def get_bool1(self):
""":obj:`bool`: A boolean value."""
pass
def get_bool2(self):
"""Returns a boolean value.
Returns
-------
:obj:`bool`
A boolean value.
"""
pass
def get_string1(self):
""":obj:`str`: A string value."""
pass
def get_string2(self):
"""Returns a string value.
Returns
-------
:obj:`str`
A string value.
"""
pass
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
def get_float1(self):
""":obj:`float`: A floating-point value."""
pass
def get_float2(self):
"""Returns a floating-point value.
Returns
-------
:obj:`float`
A floating-point value.
"""
pass
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
def get_list_of_bool2(self):
"""A list of boolean values.
Returns
-------
:obj:`list` of :obj:`bool`
A list of boolean values.
"""
pass
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
def get_list_of_bytes1(self):
""":obj:`list` of :obj:`bytes`: A list of bytes values."""
pass
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
def get_list_of_float2(self):
"""A list of floating-point values.
Returns
-------
:obj:`list` of :obj:`float`
A list of floating-point values.
"""
pass
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
def get_dict_of_bool2(self):
"""A dictionary of boolean values.
Returns
-------
:obj:`dict` of :obj:`bool`
A dictionary of boolean values.
"""
pass
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
def get_dict_of_int1(self):
""":obj:`dict` of :obj:`int`: A dictionary of integer values."""
pass
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
def get_dict_of_float1(self):
""":obj:`dict` of :obj:`float`: A dictionary of floating-point values."""
pass
def get_dict_of_float2(self):
"""A dictionary of floating-point values.
Returns
-------
:obj:`dict` of :obj:`float`
A dictionary of floating-point values.
"""
pass
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
def test_return_type_builtin():
demo = ConnectionDemo(EquipmentRecord(), MyConnection)
assert demo.get_none1() is None
assert demo.get_none2() is None
assert isinstance(demo.get_bool1(), bool)
assert isinstance(demo.get_bool2(), bool)
assert isinstance(demo.get_string1(), str)
assert isinstance(demo.get_string2(), str)
assert isinstance(demo.get_bytes1(), bytes)
assert isinstance(demo.get_bytes2(), bytes)
assert isinstance(demo.get_int1(), int)
assert isinstance(demo.get_int2(), int)
assert isinstance(demo.get_float1(), float)
assert isinstance(demo.get_float2(), float)
x = demo.get_list_of_bool1()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_bool2()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_str1()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_str2()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_bytes1()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_bytes2()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_int1()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_int2()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_float1()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_list_of_float2()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_dict_of_bool1()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_bool2()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_str1()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_str2()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_bytes1()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_bytes2()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_int1()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_int2()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_float1()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_dict_of_float2()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_multiple1()
assert len(x) == 5
assert isinstance(x[0], str)
assert isinstance(x[1], float)
assert isinstance(x[2], float)
assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)
assert isinstance(x[4], bytes)
def test_return_type_object():
scope = ConnectionDemo(EquipmentRecord(), PicoScope)
x = scope.channel()
assert isinstance(x, dict) and x['demo'] == PicoScopeChannel
|
flexible
|
{
"blob_id": "82c3419679a93c7640eae48b543aca75f5ff086d",
"index": 4880,
"step-1": "<mask token>\n\n\nclass MyConnection(Connection):\n <mask token>\n\n def get_none1(self):\n \"\"\"No return type is specified.\"\"\"\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_bytes1(self):\n \"\"\":obj:`bytes`: A bytes value.\"\"\"\n pass\n\n def get_bytes2(self):\n \"\"\"Returns a bytes value.\n\n Returns\n -------\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n def get_int1(self):\n \"\"\":obj:`int`: An integer value.\"\"\"\n pass\n\n def get_int2(self):\n \"\"\"Returns an integer value.\n\n Returns\n -------\n :obj:`int`\n An integer value.\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n def get_list_of_bool1(self):\n \"\"\":obj:`list` of :obj:`bool`: A list of boolean values.\"\"\"\n pass\n <mask token>\n\n def get_list_of_str1(self):\n \"\"\":obj:`list` of :obj:`str`: A list of string values.\"\"\"\n pass\n\n def get_list_of_str2(self):\n \"\"\"A list of string values.\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n A list of string values.\n \"\"\"\n pass\n <mask token>\n\n def get_list_of_bytes2(self):\n \"\"\"A list of bytes values.\n\n Returns\n -------\n :obj:`list` of :obj:`bytes`\n A list of bytes values.\n \"\"\"\n pass\n\n def get_list_of_int1(self):\n \"\"\":obj:`list` of :obj:`int`: A list of integer values.\"\"\"\n pass\n\n def get_list_of_int2(self):\n \"\"\"A list of integer values.\n\n Returns\n -------\n :obj:`list` of :obj:`int`\n A list of integer values.\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n def get_dict_of_bool1(self):\n \"\"\":obj:`dict` of :obj:`bool`: A dictionary of boolean values.\"\"\"\n pass\n <mask token>\n\n def get_dict_of_str1(self):\n \"\"\":obj:`dict` of :obj:`str`: A dictionary of string values.\"\"\"\n pass\n\n def get_dict_of_str2(self):\n \"\"\"A dictionary of string values.\n\n Returns\n -------\n :obj:`dict` of :obj:`str`\n A dictionary of string values.\n \"\"\"\n pass\n\n def get_dict_of_bytes1(self):\n \"\"\":obj:`dict` of :obj:`bytes`: A dictionary of bytes values.\"\"\"\n pass\n\n def get_dict_of_bytes2(self):\n \"\"\"A dictionary of bytes values.\n\n Returns\n -------\n :obj:`dict` of :obj:`bytes`\n A dictionary of bytes values.\n \"\"\"\n pass\n <mask token>\n\n def get_dict_of_int2(self):\n \"\"\"A dictionary of integer values.\n\n Returns\n -------\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n def get_multiple1(self):\n \"\"\"Many different data types.\n\n Returns\n -------\n :obj:`str`\n A string value.\n :obj:`float`\n A floating-point value.\n :obj:`float`\n A floating-point value.\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyConnection(Connection):\n <mask token>\n\n def get_none1(self):\n \"\"\"No return type is specified.\"\"\"\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_bytes1(self):\n \"\"\":obj:`bytes`: A bytes value.\"\"\"\n pass\n\n def get_bytes2(self):\n \"\"\"Returns a bytes value.\n\n Returns\n -------\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n def get_int1(self):\n \"\"\":obj:`int`: An integer value.\"\"\"\n pass\n\n def get_int2(self):\n \"\"\"Returns an integer value.\n\n Returns\n -------\n :obj:`int`\n An integer value.\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n def get_list_of_bool1(self):\n \"\"\":obj:`list` of :obj:`bool`: A list of boolean values.\"\"\"\n pass\n <mask token>\n\n def get_list_of_str1(self):\n \"\"\":obj:`list` of :obj:`str`: A list of string values.\"\"\"\n pass\n\n def get_list_of_str2(self):\n \"\"\"A list of string values.\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n A list of string values.\n \"\"\"\n pass\n <mask token>\n\n def get_list_of_bytes2(self):\n \"\"\"A list of bytes values.\n\n Returns\n -------\n :obj:`list` of :obj:`bytes`\n A list of bytes values.\n \"\"\"\n pass\n\n def get_list_of_int1(self):\n \"\"\":obj:`list` of :obj:`int`: A list of integer values.\"\"\"\n pass\n\n def get_list_of_int2(self):\n \"\"\"A list of integer values.\n\n Returns\n -------\n :obj:`list` of :obj:`int`\n A list of integer values.\n \"\"\"\n pass\n\n def get_list_of_float1(self):\n \"\"\":obj:`list` of :obj:`float`: A list of floating-point values.\"\"\"\n pass\n <mask token>\n\n def get_dict_of_bool1(self):\n \"\"\":obj:`dict` of :obj:`bool`: A dictionary of boolean values.\"\"\"\n pass\n <mask token>\n\n def get_dict_of_str1(self):\n \"\"\":obj:`dict` of :obj:`str`: A dictionary of string values.\"\"\"\n pass\n\n def get_dict_of_str2(self):\n \"\"\"A dictionary of string values.\n\n Returns\n -------\n :obj:`dict` of :obj:`str`\n A dictionary of string values.\n \"\"\"\n pass\n\n def get_dict_of_bytes1(self):\n \"\"\":obj:`dict` of :obj:`bytes`: A dictionary of bytes values.\"\"\"\n pass\n\n def get_dict_of_bytes2(self):\n \"\"\"A dictionary of bytes values.\n\n Returns\n -------\n :obj:`dict` of :obj:`bytes`\n A dictionary of bytes values.\n \"\"\"\n pass\n <mask token>\n\n def get_dict_of_int2(self):\n \"\"\"A dictionary of integer values.\n\n Returns\n -------\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n def get_multiple1(self):\n \"\"\"Many different data types.\n\n Returns\n -------\n :obj:`str`\n A string value.\n :obj:`float`\n A floating-point value.\n :obj:`float`\n A floating-point value.\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyConnection(Connection):\n <mask token>\n\n def get_none1(self):\n \"\"\"No return type is specified.\"\"\"\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def get_string1(self):\n \"\"\":obj:`str`: A string value.\"\"\"\n pass\n <mask token>\n\n def get_bytes1(self):\n \"\"\":obj:`bytes`: A bytes value.\"\"\"\n pass\n\n def get_bytes2(self):\n \"\"\"Returns a bytes value.\n\n Returns\n -------\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n def get_int1(self):\n \"\"\":obj:`int`: An integer value.\"\"\"\n pass\n\n def get_int2(self):\n \"\"\"Returns an integer value.\n\n Returns\n -------\n :obj:`int`\n An integer value.\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n def get_list_of_bool1(self):\n \"\"\":obj:`list` of :obj:`bool`: A list of boolean values.\"\"\"\n pass\n <mask token>\n\n def get_list_of_str1(self):\n \"\"\":obj:`list` of :obj:`str`: A list of string values.\"\"\"\n pass\n\n def get_list_of_str2(self):\n \"\"\"A list of string values.\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n A list of string values.\n \"\"\"\n pass\n <mask token>\n\n def get_list_of_bytes2(self):\n \"\"\"A list of bytes values.\n\n Returns\n -------\n :obj:`list` of :obj:`bytes`\n A list of bytes values.\n \"\"\"\n pass\n\n def get_list_of_int1(self):\n \"\"\":obj:`list` of :obj:`int`: A list of integer values.\"\"\"\n pass\n\n def get_list_of_int2(self):\n \"\"\"A list of integer values.\n\n Returns\n -------\n :obj:`list` of :obj:`int`\n A list of integer values.\n \"\"\"\n pass\n\n def get_list_of_float1(self):\n \"\"\":obj:`list` of :obj:`float`: A list of floating-point values.\"\"\"\n pass\n <mask token>\n\n def get_dict_of_bool1(self):\n \"\"\":obj:`dict` of :obj:`bool`: A dictionary of boolean values.\"\"\"\n pass\n <mask token>\n\n def get_dict_of_str1(self):\n \"\"\":obj:`dict` of :obj:`str`: A dictionary of string values.\"\"\"\n pass\n\n def get_dict_of_str2(self):\n \"\"\"A dictionary of string values.\n\n Returns\n -------\n :obj:`dict` of :obj:`str`\n A dictionary of string values.\n \"\"\"\n pass\n\n def get_dict_of_bytes1(self):\n \"\"\":obj:`dict` of :obj:`bytes`: A dictionary of bytes values.\"\"\"\n pass\n\n def get_dict_of_bytes2(self):\n \"\"\"A dictionary of bytes values.\n\n Returns\n -------\n :obj:`dict` of :obj:`bytes`\n A dictionary of bytes values.\n \"\"\"\n pass\n <mask token>\n\n def get_dict_of_int2(self):\n \"\"\"A dictionary of integer values.\n\n Returns\n -------\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n def get_multiple1(self):\n \"\"\"Many different data types.\n\n Returns\n -------\n :obj:`str`\n A string value.\n :obj:`float`\n A floating-point value.\n :obj:`float`\n A floating-point value.\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MyConnection(Connection):\n\n def __init__(self, record):\n super(MyConnection, self).__init__(record)\n\n def get_none1(self):\n \"\"\"No return type is specified.\"\"\"\n pass\n\n def get_none2(self, channel):\n \"\"\"This function takes 1 input but returns nothing.\n\n Parameters\n ----------\n channel : :obj:`str`\n Some channel number\n \"\"\"\n pass\n\n def get_bool1(self):\n \"\"\":obj:`bool`: A boolean value.\"\"\"\n pass\n\n def get_bool2(self):\n \"\"\"Returns a boolean value.\n\n Returns\n -------\n :obj:`bool`\n A boolean value.\n \"\"\"\n pass\n\n def get_string1(self):\n \"\"\":obj:`str`: A string value.\"\"\"\n pass\n\n def get_string2(self):\n \"\"\"Returns a string value.\n\n Returns\n -------\n :obj:`str`\n A string value.\n \"\"\"\n pass\n\n def get_bytes1(self):\n \"\"\":obj:`bytes`: A bytes value.\"\"\"\n pass\n\n def get_bytes2(self):\n \"\"\"Returns a bytes value.\n\n Returns\n -------\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n def get_int1(self):\n \"\"\":obj:`int`: An integer value.\"\"\"\n pass\n\n def get_int2(self):\n \"\"\"Returns an integer value.\n\n Returns\n -------\n :obj:`int`\n An integer value.\n \"\"\"\n pass\n\n def get_float1(self):\n \"\"\":obj:`float`: A floating-point value.\"\"\"\n pass\n\n def get_float2(self):\n \"\"\"Returns a floating-point value.\n\n Returns\n -------\n :obj:`float`\n A floating-point value.\n \"\"\"\n pass\n\n def get_list_of_bool1(self):\n \"\"\":obj:`list` of :obj:`bool`: A list of boolean values.\"\"\"\n pass\n\n def get_list_of_bool2(self):\n \"\"\"A list of boolean values.\n\n Returns\n -------\n :obj:`list` of :obj:`bool`\n A list of boolean values.\n \"\"\"\n pass\n\n def get_list_of_str1(self):\n \"\"\":obj:`list` of :obj:`str`: A list of string values.\"\"\"\n pass\n\n def get_list_of_str2(self):\n \"\"\"A list of string values.\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n A list of string values.\n \"\"\"\n pass\n\n def get_list_of_bytes1(self):\n \"\"\":obj:`list` of :obj:`bytes`: A list of bytes values.\"\"\"\n pass\n\n def get_list_of_bytes2(self):\n \"\"\"A list of bytes values.\n\n Returns\n -------\n :obj:`list` of :obj:`bytes`\n A list of bytes values.\n \"\"\"\n pass\n\n def get_list_of_int1(self):\n \"\"\":obj:`list` of :obj:`int`: A list of integer values.\"\"\"\n pass\n\n def get_list_of_int2(self):\n \"\"\"A list of integer values.\n\n Returns\n -------\n :obj:`list` of :obj:`int`\n A list of integer values.\n \"\"\"\n pass\n\n def get_list_of_float1(self):\n \"\"\":obj:`list` of :obj:`float`: A list of floating-point values.\"\"\"\n pass\n\n def get_list_of_float2(self):\n \"\"\"A list of floating-point values.\n\n Returns\n -------\n :obj:`list` of :obj:`float`\n A list of floating-point values.\n \"\"\"\n pass\n\n def get_dict_of_bool1(self):\n \"\"\":obj:`dict` of :obj:`bool`: A dictionary of boolean values.\"\"\"\n pass\n\n def get_dict_of_bool2(self):\n \"\"\"A dictionary of boolean values.\n\n Returns\n -------\n :obj:`dict` of :obj:`bool`\n A dictionary of boolean values.\n \"\"\"\n pass\n\n def get_dict_of_str1(self):\n \"\"\":obj:`dict` of :obj:`str`: A dictionary of string values.\"\"\"\n pass\n\n def get_dict_of_str2(self):\n \"\"\"A dictionary of string values.\n\n Returns\n -------\n :obj:`dict` of :obj:`str`\n A dictionary of string values.\n \"\"\"\n pass\n\n def get_dict_of_bytes1(self):\n \"\"\":obj:`dict` of :obj:`bytes`: A dictionary of bytes values.\"\"\"\n pass\n\n def get_dict_of_bytes2(self):\n \"\"\"A dictionary of bytes values.\n\n Returns\n -------\n :obj:`dict` of :obj:`bytes`\n A dictionary of bytes values.\n \"\"\"\n pass\n\n def get_dict_of_int1(self):\n \"\"\":obj:`dict` of :obj:`int`: A dictionary of integer values.\"\"\"\n pass\n\n def get_dict_of_int2(self):\n \"\"\"A dictionary of integer values.\n\n Returns\n -------\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n \"\"\"\n pass\n\n def get_dict_of_float1(self):\n \"\"\":obj:`dict` of :obj:`float`: A dictionary of floating-point values.\"\"\"\n pass\n\n def get_dict_of_float2(self):\n \"\"\"A dictionary of floating-point values.\n\n Returns\n -------\n :obj:`dict` of :obj:`float`\n A dictionary of floating-point values.\n \"\"\"\n pass\n\n def get_multiple1(self):\n \"\"\"Many different data types.\n\n Returns\n -------\n :obj:`str`\n A string value.\n :obj:`float`\n A floating-point value.\n :obj:`float`\n A floating-point value.\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n\ndef test_return_type_builtin():\n demo = ConnectionDemo(EquipmentRecord(), MyConnection)\n assert demo.get_none1() is None\n assert demo.get_none2() is None\n assert isinstance(demo.get_bool1(), bool)\n assert isinstance(demo.get_bool2(), bool)\n assert isinstance(demo.get_string1(), str)\n assert isinstance(demo.get_string2(), str)\n assert isinstance(demo.get_bytes1(), bytes)\n assert isinstance(demo.get_bytes2(), bytes)\n assert isinstance(demo.get_int1(), int)\n assert isinstance(demo.get_int2(), int)\n assert isinstance(demo.get_float1(), float)\n assert isinstance(demo.get_float2(), float)\n x = demo.get_list_of_bool1()\n assert isinstance(x, list) and isinstance(x[0], bool)\n x = demo.get_list_of_bool2()\n assert isinstance(x, list) and isinstance(x[0], bool)\n x = demo.get_list_of_str1()\n assert isinstance(x, list) and isinstance(x[0], str)\n x = demo.get_list_of_str2()\n assert isinstance(x, list) and isinstance(x[0], str)\n x = demo.get_list_of_bytes1()\n assert isinstance(x, list) and isinstance(x[0], bytes)\n x = demo.get_list_of_bytes2()\n assert isinstance(x, list) and isinstance(x[0], bytes)\n x = demo.get_list_of_int1()\n assert isinstance(x, list) and isinstance(x[0], int)\n x = demo.get_list_of_int2()\n assert isinstance(x, list) and isinstance(x[0], int)\n x = demo.get_list_of_float1()\n assert isinstance(x, list) and isinstance(x[0], float)\n x = demo.get_list_of_float2()\n assert isinstance(x, list) and isinstance(x[0], float)\n x = demo.get_dict_of_bool1()\n assert isinstance(x, dict) and isinstance(x['demo'], bool)\n x = demo.get_dict_of_bool2()\n assert isinstance(x, dict) and isinstance(x['demo'], bool)\n x = demo.get_dict_of_str1()\n assert isinstance(x, dict) and isinstance(x['demo'], str)\n x = demo.get_dict_of_str2()\n assert isinstance(x, dict) and isinstance(x['demo'], str)\n x = demo.get_dict_of_bytes1()\n assert isinstance(x, dict) and isinstance(x['demo'], bytes)\n x = demo.get_dict_of_bytes2()\n assert isinstance(x, dict) and isinstance(x['demo'], bytes)\n x = demo.get_dict_of_int1()\n assert isinstance(x, dict) and isinstance(x['demo'], int)\n x = demo.get_dict_of_int2()\n assert isinstance(x, dict) and isinstance(x['demo'], int)\n x = demo.get_dict_of_float1()\n assert isinstance(x, dict) and isinstance(x['demo'], float)\n x = demo.get_dict_of_float2()\n assert isinstance(x, dict) and isinstance(x['demo'], float)\n x = demo.get_multiple1()\n assert len(x) == 5\n assert isinstance(x[0], str)\n assert isinstance(x[1], float)\n assert isinstance(x[2], float)\n assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)\n assert isinstance(x[4], bytes)\n\n\ndef test_return_type_object():\n scope = ConnectionDemo(EquipmentRecord(), PicoScope)\n x = scope.channel()\n assert isinstance(x, dict) and x['demo'] == PicoScopeChannel\n",
"step-5": "from msl.equipment.connection import Connection\nfrom msl.equipment.connection_demo import ConnectionDemo\nfrom msl.equipment.record_types import EquipmentRecord\nfrom msl.equipment.resources.picotech.picoscope.picoscope import PicoScope\nfrom msl.equipment.resources.picotech.picoscope.channel import PicoScopeChannel\n\n\nclass MyConnection(Connection):\n\n def __init__(self, record):\n super(MyConnection, self).__init__(record)\n\n def get_none1(self):\n \"\"\"No return type is specified.\"\"\"\n pass\n\n def get_none2(self, channel):\n \"\"\"This function takes 1 input but returns nothing.\n\n Parameters\n ----------\n channel : :obj:`str`\n Some channel number\n \"\"\"\n pass\n\n def get_bool1(self):\n \"\"\":obj:`bool`: A boolean value.\"\"\"\n pass\n\n def get_bool2(self):\n \"\"\"Returns a boolean value.\n\n Returns\n -------\n :obj:`bool`\n A boolean value.\n \"\"\"\n pass\n\n def get_string1(self):\n \"\"\":obj:`str`: A string value.\"\"\"\n pass\n\n def get_string2(self):\n \"\"\"Returns a string value.\n\n Returns\n -------\n :obj:`str`\n A string value.\n \"\"\"\n pass\n\n def get_bytes1(self):\n \"\"\":obj:`bytes`: A bytes value.\"\"\"\n pass\n\n def get_bytes2(self):\n \"\"\"Returns a bytes value.\n\n Returns\n -------\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n def get_int1(self):\n \"\"\":obj:`int`: An integer value.\"\"\"\n pass\n\n def get_int2(self):\n \"\"\"Returns an integer value.\n\n Returns\n -------\n :obj:`int`\n An integer value.\n \"\"\"\n pass\n\n def get_float1(self):\n \"\"\":obj:`float`: A floating-point value.\"\"\"\n pass\n\n def get_float2(self):\n \"\"\"Returns a floating-point value.\n\n Returns\n -------\n :obj:`float`\n A floating-point value.\n \"\"\"\n pass\n\n def get_list_of_bool1(self):\n \"\"\":obj:`list` of :obj:`bool`: A list of boolean values.\"\"\"\n pass\n\n def get_list_of_bool2(self):\n \"\"\"A list of boolean values.\n\n Returns\n -------\n :obj:`list` of :obj:`bool`\n A list of boolean values.\n \"\"\"\n pass\n\n def get_list_of_str1(self):\n \"\"\":obj:`list` of :obj:`str`: A list of string values.\"\"\"\n pass\n\n def get_list_of_str2(self):\n \"\"\"A list of string values.\n\n Returns\n -------\n :obj:`list` of :obj:`str`\n A list of string values.\n \"\"\"\n pass\n\n def get_list_of_bytes1(self):\n \"\"\":obj:`list` of :obj:`bytes`: A list of bytes values.\"\"\"\n pass\n\n def get_list_of_bytes2(self):\n \"\"\"A list of bytes values.\n\n Returns\n -------\n :obj:`list` of :obj:`bytes`\n A list of bytes values.\n \"\"\"\n pass\n\n def get_list_of_int1(self):\n \"\"\":obj:`list` of :obj:`int`: A list of integer values.\"\"\"\n pass\n\n def get_list_of_int2(self):\n \"\"\"A list of integer values.\n\n Returns\n -------\n :obj:`list` of :obj:`int`\n A list of integer values.\n \"\"\"\n pass\n\n def get_list_of_float1(self):\n \"\"\":obj:`list` of :obj:`float`: A list of floating-point values.\"\"\"\n pass\n\n def get_list_of_float2(self):\n \"\"\"A list of floating-point values.\n\n Returns\n -------\n :obj:`list` of :obj:`float`\n A list of floating-point values.\n \"\"\"\n pass\n\n def get_dict_of_bool1(self):\n \"\"\":obj:`dict` of :obj:`bool`: A dictionary of boolean values.\"\"\"\n pass\n\n def get_dict_of_bool2(self):\n \"\"\"A dictionary of boolean values.\n\n Returns\n -------\n :obj:`dict` of :obj:`bool`\n A dictionary of boolean values.\n \"\"\"\n pass\n\n def get_dict_of_str1(self):\n \"\"\":obj:`dict` of :obj:`str`: A dictionary of string values.\"\"\"\n pass\n\n def get_dict_of_str2(self):\n \"\"\"A dictionary of string values.\n\n Returns\n -------\n :obj:`dict` of :obj:`str`\n A dictionary of string values.\n \"\"\"\n pass\n\n def get_dict_of_bytes1(self):\n \"\"\":obj:`dict` of :obj:`bytes`: A dictionary of bytes values.\"\"\"\n pass\n\n def get_dict_of_bytes2(self):\n \"\"\"A dictionary of bytes values.\n\n Returns\n -------\n :obj:`dict` of :obj:`bytes`\n A dictionary of bytes values.\n \"\"\"\n pass\n\n def get_dict_of_int1(self):\n \"\"\":obj:`dict` of :obj:`int`: A dictionary of integer values.\"\"\"\n pass\n\n def get_dict_of_int2(self):\n \"\"\"A dictionary of integer values.\n\n Returns\n -------\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n \"\"\"\n pass\n\n def get_dict_of_float1(self):\n \"\"\":obj:`dict` of :obj:`float`: A dictionary of floating-point values.\"\"\"\n pass\n\n def get_dict_of_float2(self):\n \"\"\"A dictionary of floating-point values.\n\n Returns\n -------\n :obj:`dict` of :obj:`float`\n A dictionary of floating-point values.\n \"\"\"\n pass\n\n def get_multiple1(self):\n \"\"\"Many different data types.\n\n Returns\n -------\n :obj:`str`\n A string value.\n :obj:`float`\n A floating-point value.\n :obj:`float`\n A floating-point value.\n :obj:`dict` of :obj:`int`\n A dictionary of integer values.\n :obj:`bytes`\n A bytes value.\n \"\"\"\n pass\n\n\ndef test_return_type_builtin():\n demo = ConnectionDemo(EquipmentRecord(), MyConnection)\n\n assert demo.get_none1() is None\n assert demo.get_none2() is None\n\n assert isinstance(demo.get_bool1(), bool)\n assert isinstance(demo.get_bool2(), bool)\n\n assert isinstance(demo.get_string1(), str)\n assert isinstance(demo.get_string2(), str)\n\n assert isinstance(demo.get_bytes1(), bytes)\n assert isinstance(demo.get_bytes2(), bytes)\n\n assert isinstance(demo.get_int1(), int)\n assert isinstance(demo.get_int2(), int)\n\n assert isinstance(demo.get_float1(), float)\n assert isinstance(demo.get_float2(), float)\n\n x = demo.get_list_of_bool1()\n assert isinstance(x, list) and isinstance(x[0], bool)\n\n x = demo.get_list_of_bool2()\n assert isinstance(x, list) and isinstance(x[0], bool)\n\n x = demo.get_list_of_str1()\n assert isinstance(x, list) and isinstance(x[0], str)\n\n x = demo.get_list_of_str2()\n assert isinstance(x, list) and isinstance(x[0], str)\n\n x = demo.get_list_of_bytes1()\n assert isinstance(x, list) and isinstance(x[0], bytes)\n\n x = demo.get_list_of_bytes2()\n assert isinstance(x, list) and isinstance(x[0], bytes)\n\n x = demo.get_list_of_int1()\n assert isinstance(x, list) and isinstance(x[0], int)\n\n x = demo.get_list_of_int2()\n assert isinstance(x, list) and isinstance(x[0], int)\n\n x = demo.get_list_of_float1()\n assert isinstance(x, list) and isinstance(x[0], float)\n\n x = demo.get_list_of_float2()\n assert isinstance(x, list) and isinstance(x[0], float)\n\n x = demo.get_dict_of_bool1()\n assert isinstance(x, dict) and isinstance(x['demo'], bool)\n\n x = demo.get_dict_of_bool2()\n assert isinstance(x, dict) and isinstance(x['demo'], bool)\n\n x = demo.get_dict_of_str1()\n assert isinstance(x, dict) and isinstance(x['demo'], str)\n\n x = demo.get_dict_of_str2()\n assert isinstance(x, dict) and isinstance(x['demo'], str)\n\n x = demo.get_dict_of_bytes1()\n assert isinstance(x, dict) and isinstance(x['demo'], bytes)\n\n x = demo.get_dict_of_bytes2()\n assert isinstance(x, dict) and isinstance(x['demo'], bytes)\n\n x = demo.get_dict_of_int1()\n assert isinstance(x, dict) and isinstance(x['demo'], int)\n\n x = demo.get_dict_of_int2()\n assert isinstance(x, dict) and isinstance(x['demo'], int)\n\n x = demo.get_dict_of_float1()\n assert isinstance(x, dict) and isinstance(x['demo'], float)\n\n x = demo.get_dict_of_float2()\n assert isinstance(x, dict) and isinstance(x['demo'], float)\n\n x = demo.get_multiple1()\n assert len(x) == 5\n assert isinstance(x[0], str)\n assert isinstance(x[1], float)\n assert isinstance(x[2], float)\n assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)\n assert isinstance(x[4], bytes)\n\n\ndef test_return_type_object():\n scope = ConnectionDemo(EquipmentRecord(), PicoScope)\n\n x = scope.channel()\n assert isinstance(x, dict) and x['demo'] == PicoScopeChannel\n",
"step-ids": [
19,
20,
21,
37,
39
]
}
|
[
19,
20,
21,
37,
39
] |
<|reserved_special_token_0|>
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
def get_ac(self, ob):
"""
ob is shape (n_dim,)
"""
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1, -1))
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
<|reserved_special_token_0|>
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
def get_ac(self, ob):
"""
ob is shape (n_dim,)
"""
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1, -1))
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render=False):
next_ob = self.env.reset().reshape(1, -1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1, -1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
if done or timesteps > self.max_timesteps:
break
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)
), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)
), torch.tensor(res), torch.tensor(terminals)
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
<|reserved_special_token_1|>
from os import path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributions as distributions
import numpy as np
from torch.serialization import load
import global_var as gv
torch.set_default_dtype(gv.torch_default_type)
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
def get_ac(self, ob):
"""
ob is shape (n_dim,)
"""
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1, -1))
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render=False):
next_ob = self.env.reset().reshape(1, -1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1, -1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
if done or timesteps > self.max_timesteps:
break
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)
), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)
), torch.tensor(res), torch.tensor(terminals)
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
<|reserved_special_token_1|>
# 总管buffer和policy
from os import path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributions as distributions
import numpy as np
from torch.serialization import load
import global_var as gv
torch.set_default_dtype(gv.torch_default_type)
class PG_Agent(object):
def __init__(
self,
env,
policy: torch.nn.modules.container.Sequential,
learning_rate: float,
n_policy: int, # 迭代多少个策略
n_episode: int, # 每个策略下输出多少个episode用来更新该策略
max_timesteps: int # 最多一个episode多个步,免得一个很强的策略出来以后episode不终止了
) -> None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
# self.buffer = buffer
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.learning_rate)
def get_acs(self, obs):
'''
obs is shape (batch_size, n_dim)
'''
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs # shape (batch_size,)
def get_ac(self, ob):
'''
ob is shape (n_dim,)
'''
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1,-1))
# 按照概率分布来获取ac,而不是直接取较大Logit者,这里dubug了好久,烦烦烦
# ac = torch.argmax(logits)
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render = False):
next_ob = self.env.reset().reshape(1,-1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1,-1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
# break
if done or timesteps > self.max_timesteps:
break
# print(acs, type(acs), 'acs')
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)), torch.tensor(res), torch.tensor(terminals)
def train(self):
'''
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
'''
# print(self.policy.state_dict(), 'p1')
for i_policy in range(self.n_policy):
J = 0 # j tilda,也就是loss
q = 0
for i_episode in range(self.n_episode):
# 生成
obs, acs, next_obs, res, terminals = self.generate_episode()
# print(acs, acs.shape, 'acs')
assert(len(obs)==len(next_obs)==len(res)==len(acs)==len(terminals))
r_tau = sum(res)
logits = self.policy(obs)
# print(logits, logits.shape, 'logits')
# print(acs, type(acs))
criterion = nn.CrossEntropyLoss(reduction='sum') # 注意这里要选择sum才对,否则和policy gradient的公式并不一样,导致训练一直没有效果,难受啊,找了好久这个问题
negative_likelihoods = criterion(logits, acs)
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
negative_likelihoods = negative_likelihoods.sum()
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
# print(r_tau, 'r_tau')
J += negative_likelihoods*r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(f"第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q/self.n_episode}") # 这里的loss估计不对,要用平均每次的
J.backward()
self.optimizer.step()
# print(self.policy.state_dict(), 'p2')
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
|
flexible
|
{
"blob_id": "b2cfd397e48213a540608fc232db2eab282935bb",
"index": 1481,
"step-1": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n <mask token>\n <mask token>\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-2": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n <mask token>\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-3": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n\n def generate_episode(self, render=False):\n next_ob = self.env.reset().reshape(1, -1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1, -1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n if done or timesteps > self.max_timesteps:\n break\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)\n ), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)\n ), torch.tensor(res), torch.tensor(terminals)\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-4": "from os import path\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.distributions as distributions\nimport numpy as np\nfrom torch.serialization import load\nimport global_var as gv\ntorch.set_default_dtype(gv.torch_default_type)\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n\n def generate_episode(self, render=False):\n next_ob = self.env.reset().reshape(1, -1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1, -1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n if done or timesteps > self.max_timesteps:\n break\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)\n ), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)\n ), torch.tensor(res), torch.tensor(terminals)\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-5": "# 总管buffer和policy\n\n\n\nfrom os import path\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.distributions as distributions\nimport numpy as np\nfrom torch.serialization import load\n\nimport global_var as gv\n\ntorch.set_default_dtype(gv.torch_default_type)\n\nclass PG_Agent(object):\n def __init__(\n self,\n env,\n policy: torch.nn.modules.container.Sequential, \n learning_rate: float,\n n_policy: int, # 迭代多少个策略\n n_episode: int, # 每个策略下输出多少个episode用来更新该策略\n max_timesteps: int # 最多一个episode多个步,免得一个很强的策略出来以后episode不终止了\n ) -> None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n # self.buffer = buffer\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.learning_rate)\n\n def get_acs(self, obs):\n '''\n obs is shape (batch_size, n_dim)\n '''\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs # shape (batch_size,)\n \n def get_ac(self, ob):\n '''\n ob is shape (n_dim,)\n '''\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1,-1))\n # 按照概率分布来获取ac,而不是直接取较大Logit者,这里dubug了好久,烦烦烦\n # ac = torch.argmax(logits)\n distri = distributions.Categorical(logits=logits)\n\n return distri.sample().item()\n\n def generate_episode(self, render = False):\n next_ob = self.env.reset().reshape(1,-1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1,-1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n # break\n if done or timesteps > self.max_timesteps:\n break\n # print(acs, type(acs), 'acs')\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)), torch.tensor(res), torch.tensor(terminals)\n\n\n def train(self):\n '''\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n '''\n # print(self.policy.state_dict(), 'p1')\n for i_policy in range(self.n_policy):\n J = 0 # j tilda,也就是loss\n q = 0\n for i_episode in range(self.n_episode):\n # 生成\n obs, acs, next_obs, res, terminals = self.generate_episode()\n # print(acs, acs.shape, 'acs')\n assert(len(obs)==len(next_obs)==len(res)==len(acs)==len(terminals))\n r_tau = sum(res)\n logits = self.policy(obs)\n\n # print(logits, logits.shape, 'logits')\n # print(acs, type(acs))\n\n criterion = nn.CrossEntropyLoss(reduction='sum') # 注意这里要选择sum才对,否则和policy gradient的公式并不一样,导致训练一直没有效果,难受啊,找了好久这个问题\n negative_likelihoods = criterion(logits, acs)\n # print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')\n negative_likelihoods = negative_likelihoods.sum()\n # print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')\n # print(r_tau, 'r_tau')\n J += negative_likelihoods*r_tau\n q += res.sum().item()\n \n J /= self.n_episode\n self.optimizer.zero_grad()\n print(f\"第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q/self.n_episode}\") # 这里的loss估计不对,要用平均每次的\n J.backward()\n self.optimizer.step()\n\n # print(self.policy.state_dict(), 'p2')\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n\n\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BmExam(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BmExam(db.Model):
__tablename__ = 'bm_exam'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer, nullable=False, server_default=db.
FetchedValue())
exam_id = db.Column(db.Integer, nullable=False)
exam_name = db.Column(db.String(200), nullable=False, server_default=db
.FetchedValue())
show_exam_name = db.Column(db.String(200), nullable=False,
server_default=db.FetchedValue())
numbers = db.Column(db.Integer, nullable=False, server_default=db.
FetchedValue())
x_rules = db.Column(db.String(1000), nullable=False, server_default=db.
FetchedValue())
m_rules = db.Column(db.String(1000), nullable=False, server_default=db.
FetchedValue())
rule_status = db.Column(db.Integer, nullable=False, server_default=db.
FetchedValue())
start_time = db.Column(db.DateTime, nullable=False, server_default=db.
FetchedValue())
end_time = db.Column(db.DateTime, nullable=False, server_default=db.
FetchedValue())
beizhu = db.Column(db.String(2000), nullable=False, server_default=db.
FetchedValue())
beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.
FetchedValue())
beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.
FetchedValue())
updated_time = db.Column(db.DateTime, nullable=False, server_default=db
.FetchedValue())
created_time = db.Column(db.DateTime, nullable=False, server_default=db
.FetchedValue())
<|reserved_special_token_1|>
from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.schema import FetchedValue
from application import db
class BmExam(db.Model):
__tablename__ = 'bm_exam'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer, nullable=False, server_default=db.
FetchedValue())
exam_id = db.Column(db.Integer, nullable=False)
exam_name = db.Column(db.String(200), nullable=False, server_default=db
.FetchedValue())
show_exam_name = db.Column(db.String(200), nullable=False,
server_default=db.FetchedValue())
numbers = db.Column(db.Integer, nullable=False, server_default=db.
FetchedValue())
x_rules = db.Column(db.String(1000), nullable=False, server_default=db.
FetchedValue())
m_rules = db.Column(db.String(1000), nullable=False, server_default=db.
FetchedValue())
rule_status = db.Column(db.Integer, nullable=False, server_default=db.
FetchedValue())
start_time = db.Column(db.DateTime, nullable=False, server_default=db.
FetchedValue())
end_time = db.Column(db.DateTime, nullable=False, server_default=db.
FetchedValue())
beizhu = db.Column(db.String(2000), nullable=False, server_default=db.
FetchedValue())
beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.
FetchedValue())
beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.
FetchedValue())
updated_time = db.Column(db.DateTime, nullable=False, server_default=db
.FetchedValue())
created_time = db.Column(db.DateTime, nullable=False, server_default=db
.FetchedValue())
<|reserved_special_token_1|>
# coding: utf-8
from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.schema import FetchedValue
from application import db
class BmExam(db.Model):
__tablename__ = 'bm_exam'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
exam_id = db.Column(db.Integer, nullable=False)
exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
show_exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
numbers = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
x_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())
m_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())
rule_status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
start_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
end_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
beizhu = db.Column(db.String(2000), nullable=False, server_default=db.FetchedValue())
beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
|
flexible
|
{
"blob_id": "6be2cc99d03596715d76cda41d63b8c91c829498",
"index": 2211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BmExam(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db\n .FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False,\n server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.\n FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n",
"step-4": "from sqlalchemy import Column, DateTime, Integer, String\nfrom sqlalchemy.schema import FetchedValue\nfrom application import db\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db\n .FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False,\n server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.\n FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n",
"step-5": "# coding: utf-8\nfrom sqlalchemy import Column, DateTime, Integer, String\nfrom sqlalchemy.schema import FetchedValue\nfrom application import db\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Utilties to access a column and one field of a column if the column is composite."""
from typing import TYPE_CHECKING, Optional
from greenplumpython.db import Database
from greenplumpython.expr import Expr
from greenplumpython.type import DataType
if TYPE_CHECKING:
from greenplumpython.dataframe import DataFrame
class ColumnField(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a field of a :class:`~col.Column` of composite type. This
type allows to access to the fields in a dict-like manner.
"""
def __init__(
self,
column: "Column",
field_name: str,
) -> None:
# noqa
""":meta private:"""
self._field_name = field_name
self._column = column
super().__init__(column._dataframe)
def _serialize(self, db: Optional[Database] = None) -> str:
return (
f'({self._column._serialize(db=db)})."{self._field_name}"'
if self._field_name != "*"
else f"({self._column._serialize(db=db)}).*"
)
class Column(Expr):
"""
Inherited from :class:`~expr.Expr`.
Representation of a Python object :class:`~col.Column`.
"""
def __init__(self, name: str, dataframe: "DataFrame") -> None:
# noqa: D400
""":meta private:"""
super().__init__(dataframe=dataframe)
self._name = name
self._type: Optional[DataType] = None # TODO: Add type inference
def _serialize(self, db: Optional[Database] = None) -> str:
assert self._dataframe is not None
# Quote both dataframe name and column name to avoid SQL injection.
return (
f'{self._dataframe._name}."{self._name}"'
if self._name != "*"
else f"{self._dataframe._name}.*"
)
def __getitem__(self, field_name: str) -> ColumnField:
"""
Get access to a field of the current column.
Args:
field_name: str
Returns:
Field of the column with the specified name.
"""
return ColumnField(self, field_name=field_name)
def _bind(
self,
dataframe: Optional["DataFrame"] = None,
db: Optional[Database] = None,
):
# noqa D400
""":meta private:"""
c = Column(
self._name,
self._dataframe,
)
c._db = db if db is not None else dataframe._db if dataframe is not None else self._db
assert c._db is not None
return c
|
normal
|
{
"blob_id": "a52edeec62a6849bda7e5a5481fb6e3d7d9a4c6a",
"index": 8571,
"step-1": "<mask token>\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-2": "<mask token>\n\n\nclass ColumnField(Expr):\n <mask token>\n\n def __init__(self, column: 'Column', field_name: str) ->None:\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n return (f'({self._column._serialize(db=db)}).\"{self._field_name}\"' if\n self._field_name != '*' else\n f'({self._column._serialize(db=db)}).*')\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-3": "<mask token>\n\n\nclass ColumnField(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a field of a :class:`~col.Column` of composite type. This\n type allows to access to the fields in a dict-like manner.\n \"\"\"\n\n def __init__(self, column: 'Column', field_name: str) ->None:\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n return (f'({self._column._serialize(db=db)}).\"{self._field_name}\"' if\n self._field_name != '*' else\n f'({self._column._serialize(db=db)}).*')\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-4": "<mask token>\nfrom typing import TYPE_CHECKING, Optional\nfrom greenplumpython.db import Database\nfrom greenplumpython.expr import Expr\nfrom greenplumpython.type import DataType\nif TYPE_CHECKING:\n from greenplumpython.dataframe import DataFrame\n\n\nclass ColumnField(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a field of a :class:`~col.Column` of composite type. This\n type allows to access to the fields in a dict-like manner.\n \"\"\"\n\n def __init__(self, column: 'Column', field_name: str) ->None:\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n return (f'({self._column._serialize(db=db)}).\"{self._field_name}\"' if\n self._field_name != '*' else\n f'({self._column._serialize(db=db)}).*')\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: 'DataFrame') ->None:\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None\n\n def _serialize(self, db: Optional[Database]=None) ->str:\n assert self._dataframe is not None\n return (f'{self._dataframe._name}.\"{self._name}\"' if self._name !=\n '*' else f'{self._dataframe._name}.*')\n\n def __getitem__(self, field_name: str) ->ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(self, dataframe: Optional['DataFrame']=None, db: Optional[\n Database]=None):\n \"\"\":meta private:\"\"\"\n c = Column(self._name, self._dataframe)\n c._db = (db if db is not None else dataframe._db if dataframe is not\n None else self._db)\n assert c._db is not None\n return c\n",
"step-5": "\"\"\"Utilties to access a column and one field of a column if the column is composite.\"\"\"\nfrom typing import TYPE_CHECKING, Optional\n\nfrom greenplumpython.db import Database\nfrom greenplumpython.expr import Expr\nfrom greenplumpython.type import DataType\n\nif TYPE_CHECKING:\n from greenplumpython.dataframe import DataFrame\n\n\nclass ColumnField(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a field of a :class:`~col.Column` of composite type. This\n type allows to access to the fields in a dict-like manner.\n \"\"\"\n\n def __init__(\n self,\n column: \"Column\",\n field_name: str,\n ) -> None:\n # noqa\n \"\"\":meta private:\"\"\"\n self._field_name = field_name\n self._column = column\n super().__init__(column._dataframe)\n\n def _serialize(self, db: Optional[Database] = None) -> str:\n return (\n f'({self._column._serialize(db=db)}).\"{self._field_name}\"'\n if self._field_name != \"*\"\n else f\"({self._column._serialize(db=db)}).*\"\n )\n\n\nclass Column(Expr):\n \"\"\"\n Inherited from :class:`~expr.Expr`.\n\n Representation of a Python object :class:`~col.Column`.\n \"\"\"\n\n def __init__(self, name: str, dataframe: \"DataFrame\") -> None:\n # noqa: D400\n \"\"\":meta private:\"\"\"\n super().__init__(dataframe=dataframe)\n self._name = name\n self._type: Optional[DataType] = None # TODO: Add type inference\n\n def _serialize(self, db: Optional[Database] = None) -> str:\n assert self._dataframe is not None\n # Quote both dataframe name and column name to avoid SQL injection.\n return (\n f'{self._dataframe._name}.\"{self._name}\"'\n if self._name != \"*\"\n else f\"{self._dataframe._name}.*\"\n )\n\n def __getitem__(self, field_name: str) -> ColumnField:\n \"\"\"\n Get access to a field of the current column.\n\n Args:\n field_name: str\n\n Returns:\n Field of the column with the specified name.\n \"\"\"\n return ColumnField(self, field_name=field_name)\n\n def _bind(\n self,\n dataframe: Optional[\"DataFrame\"] = None,\n db: Optional[Database] = None,\n ):\n # noqa D400\n \"\"\":meta private:\"\"\"\n c = Column(\n self._name,\n self._dataframe,\n )\n c._db = db if db is not None else dataframe._db if dataframe is not None else self._db\n assert c._db is not None\n return c\n",
"step-ids": [
6,
9,
10,
12,
13
]
}
|
[
6,
9,
10,
12,
13
] |
from ED63RDScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
FileName = 'C2219 ._SN',
MapName = 'Ruan',
Location = 'C2219.x',
MapIndex = 84,
MapDefaultBGM = "ed60015",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'ED6_DT21/C2219 ._SN',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'Vogt', # 9
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01000 ._CH', # 00
)
AddCharChipPat(
'ED6_DT07/CH01000P._CP', # 00
)
DeclNpc(
X = -2870,
Z = 0,
Y = 202000,
Direction = 270,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
ScpFunction(
"Function_0_D2", # 00, 0
"Function_1_D3", # 01, 1
"Function_2_DD", # 02, 2
"Function_3_25A", # 03, 3
"Function_4_AEC", # 04, 4
"Function_5_B4D", # 05, 5
)
def Function_0_D2(): pass
label("Function_0_D2")
Return()
# Function_0_D2 end
def Function_1_D3(): pass
label("Function_1_D3")
OP_B0(0x0, 0x78)
OP_1C(0x0, 0x0, 0x5)
Return()
# Function_1_D3 end
def Function_2_DD(): pass
label("Function_2_DD")
RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_102")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_244")
label("loc_102")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_11B")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_244")
label("loc_11B")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_134")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_244")
label("loc_134")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_14D")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_244")
label("loc_14D")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_166")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_244")
label("loc_166")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_17F")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_244")
label("loc_17F")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_198")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_244")
label("loc_198")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1B1")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_244")
label("loc_1B1")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1CA")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_244")
label("loc_1CA")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1E3")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_244")
label("loc_1E3")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1FC")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_244")
label("loc_1FC")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_215")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_244")
label("loc_215")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_22E")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_244")
label("loc_22E")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_244")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_244")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_259")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_244")
label("loc_259")
Return()
# Function_2_DD end
def Function_3_25A(): pass
label("Function_3_25A")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 2)), scpexpr(EXPR_END)), "loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_2B2")
ChrTalk( #0
0xFE,
(
"I reckon my happiness is right here in this\x01",
"lighthouse.\x02",
)
)
CloseMessageWindow()
Jump("loc_34C")
label("loc_2B2")
ChrTalk( #1
0xFE,
(
"There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
"are looking for.\x02",
)
)
CloseMessageWindow()
ChrTalk( #2
0xFE,
"I reckon that's my happiness...\x02",
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_34C")
Jump("loc_6C1")
label("loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 4)), scpexpr(EXPR_END)), "loc_477")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_3DF")
ChrTalk( #3
0xFE,
(
"There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
"and scream for help if you need it!\x02",
)
)
CloseMessageWindow()
Jump("loc_474")
label("loc_3DF")
ChrTalk( #4
0xFE,
"You lookin' for some help, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #5
0xFE,
"What do you need?\x02",
)
CloseMessageWindow()
ChrTalk( #6
0x14E,
(
"#1714FN-No. I'll be fine, honestly...\x02\x03",
"#1713FThank you for offering, sir.\x02",
)
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_474")
Jump("loc_6C1")
label("loc_477")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk( #7
0xFE,
(
"I swear, this is EXACTLY what's wrong\x01",
"with youngins these days...\x02",
)
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #8
0xFE,
"Wh-What are you doing here, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #9
0x14E,
(
"#1712FU-Umm... Excuse me, sir...\x02\x03",
"You haven't seen a young girl other\x01",
"than me in here recently have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #10
0xFE,
"A young girl? 'Fraid not.\x02",
)
CloseMessageWindow()
ChrTalk( #11
0x14E,
(
"#1713FI-I see...\x02\x03",
"Sorry for troubling you...\x02",
)
)
CloseMessageWindow()
def lambda_639():
label("loc_639")
TurnDirection(0xFE, 0x14E, 0)
OP_48()
Jump("loc_639")
QueueWorkItem2(0x10, 3, lambda_639)
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #12
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #13
0xFE,
"They sure are a pain.\x02",
)
CloseMessageWindow()
OP_A2(0x2F44)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_6C1")
Jump("loc_AE8")
label("loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 0)), scpexpr(EXPR_END)), "loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_721")
ChrTalk( #14
0xFE,
"A happiness stone, you say?\x02",
)
CloseMessageWindow()
ChrTalk( #15
0xFE,
"You think somethin' like that exists?\x02",
)
CloseMessageWindow()
Jump("loc_ADE")
label("loc_721")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1250, 0, 202480, 270)
SetChrPos(0x14F, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk( #16
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #17
0xFE,
"Wh-What might you two be doing here?\x02",
)
CloseMessageWindow()
ChrTalk( #18
0x14E,
"#1718FHello!\x02",
)
CloseMessageWindow()
OP_62(0x14E, 0x0, 1600, 0x26, 0x27, 0xFA, 0x1)
Sleep(500)
OP_63(0x14E)
ChrTalk( #19
0x14E,
(
"#1714FActually, lighthouses are pretty high up,\x01",
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
"have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0xFE,
"A-A happiness stone?!\x02",
)
CloseMessageWindow()
ChrTalk( #21
0x14F,
"#1730FThey're really shiny and pretty!\x02",
)
CloseMessageWindow()
ChrTalk( #22
0xFE,
(
"N-No, I don't recall ever seein' any\x01",
"such thing in all my years...\x02",
)
)
CloseMessageWindow()
ChrTalk( #23
0x14E,
(
"#1716FOh... That's too bad...\x02\x03",
"#1710FWell, thank you, anyway.\x02",
)
)
CloseMessageWindow()
TurnDirection(0x14E, 0x14F, 400)
Sleep(400)
ChrTalk( #24
0x14E,
"#1718FLet's keep looking, Polly! \x02",
)
CloseMessageWindow()
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(2000)
ChrTalk( #25
0x14F,
"#1731FI hope your back feels better, mister!\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
def lambda_A1A():
label("loc_A1A")
TurnDirection(0xFE, 0x14F, 0)
OP_48()
Jump("loc_A1A")
QueueWorkItem2(0x10, 3, lambda_A1A)
OP_43(0x14F, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #26
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #27
0xFE,
"...They're sharp little devils, aren't they?\x02",
)
CloseMessageWindow()
Sleep(500)
ChrTalk( #28
0xFE,
"A happiness stone, hmm...?\x02",
)
CloseMessageWindow()
OP_A2(0x2F43)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_ADE")
Jump("loc_AE8")
label("loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E2, 7)), scpexpr(EXPR_END)), "loc_AE8")
label("loc_AE8")
TalkEnd(0xFE)
Return()
# Function_3_25A end
def Function_4_AEC(): pass
label("Function_4_AEC")
def lambda_AF2():
OP_8E(0xFE, 0xB04, 0x0, 0x32104, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_AF2)
WaitChrThread(0xFE, 0x1)
def lambda_B12():
OP_8E(0xFE, 0xB04, 0x0, 0x3283E, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B12)
WaitChrThread(0xFE, 0x1)
def lambda_B32():
OP_8E(0xFE, 0xFFFFF254, 0xFFFFF830, 0x328F2, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B32)
WaitChrThread(0xFE, 0x1)
Return()
# Function_4_AEC end
def Function_5_B4D(): pass
label("Function_5_B4D")
TalkBegin(0xFF)
TalkEnd(0xFF)
Return()
# Function_5_B4D end
SaveToFile()
Try(main)
|
normal
|
{
"blob_id": "55c2bf914a77c573d1b6835f54c82921d9fa6ad6",
"index": 1010,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\nTry(main)\n",
"step-4": "from ED63RDScenarioHelper import *\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\nTry(main)\n",
"step-5": "from ED63RDScenarioHelper import *\n\ndef main():\n SetCodePage(\"ms932\")\n\n CreateScenaFile(\n FileName = 'C2219 ._SN',\n MapName = 'Ruan',\n Location = 'C2219.x',\n MapIndex = 84,\n MapDefaultBGM = \"ed60015\",\n Flags = 0,\n EntryFunctionIndex = 0xFFFF,\n Reserved = 0,\n IncludedScenario = [\n 'ED6_DT21/C2219 ._SN',\n '',\n '',\n '',\n '',\n '',\n '',\n ''\n ],\n )\n\n BuildStringList(\n '@FileName', # 8\n 'Vogt', # 9\n )\n\n DeclEntryPoint(\n Unknown_00 = 0,\n Unknown_04 = 0,\n Unknown_08 = 6000,\n Unknown_0C = 4,\n Unknown_0E = 0,\n Unknown_10 = 0,\n Unknown_14 = 9500,\n Unknown_18 = -10000,\n Unknown_1C = 0,\n Unknown_20 = 0,\n Unknown_24 = 0,\n Unknown_28 = 2800,\n Unknown_2C = 262,\n Unknown_30 = 45,\n Unknown_32 = 0,\n Unknown_34 = 360,\n Unknown_36 = 0,\n Unknown_38 = 0,\n Unknown_3A = 0,\n InitScenaIndex = 0,\n InitFunctionIndex = 0,\n EntryScenaIndex = 0,\n EntryFunctionIndex = 1,\n )\n\n\n AddCharChip(\n 'ED6_DT07/CH01000 ._CH', # 00\n )\n\n AddCharChipPat(\n 'ED6_DT07/CH01000P._CP', # 00\n )\n\n DeclNpc(\n X = -2870,\n Z = 0,\n Y = 202000,\n Direction = 270,\n Unknown2 = 0,\n Unknown3 = 0,\n ChipIndex = 0x0,\n NpcIndex = 0x101,\n InitFunctionIndex = 0,\n InitScenaIndex = 2,\n TalkFunctionIndex = 0,\n TalkScenaIndex = 3,\n )\n\n\n ScpFunction(\n \"Function_0_D2\", # 00, 0\n \"Function_1_D3\", # 01, 1\n \"Function_2_DD\", # 02, 2\n \"Function_3_25A\", # 03, 3\n \"Function_4_AEC\", # 04, 4\n \"Function_5_B4D\", # 05, 5\n )\n\n\n def Function_0_D2(): pass\n\n label(\"Function_0_D2\")\n\n Return()\n\n # Function_0_D2 end\n\n def Function_1_D3(): pass\n\n label(\"Function_1_D3\")\n\n OP_B0(0x0, 0x78)\n OP_1C(0x0, 0x0, 0x5)\n Return()\n\n # Function_1_D3 end\n\n def Function_2_DD(): pass\n\n label(\"Function_2_DD\")\n\n RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_102\")\n OP_99(0xFE, 0x0, 0x7, 0x672)\n Jump(\"loc_244\")\n\n label(\"loc_102\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_11B\")\n OP_99(0xFE, 0x1, 0x7, 0x640)\n Jump(\"loc_244\")\n\n label(\"loc_11B\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_134\")\n OP_99(0xFE, 0x2, 0x7, 0x60E)\n Jump(\"loc_244\")\n\n label(\"loc_134\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_14D\")\n OP_99(0xFE, 0x3, 0x7, 0x5DC)\n Jump(\"loc_244\")\n\n label(\"loc_14D\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_166\")\n OP_99(0xFE, 0x4, 0x7, 0x5AA)\n Jump(\"loc_244\")\n\n label(\"loc_166\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_17F\")\n OP_99(0xFE, 0x5, 0x7, 0x578)\n Jump(\"loc_244\")\n\n label(\"loc_17F\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_198\")\n OP_99(0xFE, 0x6, 0x7, 0x546)\n Jump(\"loc_244\")\n\n label(\"loc_198\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1B1\")\n OP_99(0xFE, 0x0, 0x7, 0x677)\n Jump(\"loc_244\")\n\n label(\"loc_1B1\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1CA\")\n OP_99(0xFE, 0x1, 0x7, 0x645)\n Jump(\"loc_244\")\n\n label(\"loc_1CA\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1E3\")\n OP_99(0xFE, 0x2, 0x7, 0x613)\n Jump(\"loc_244\")\n\n label(\"loc_1E3\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1FC\")\n OP_99(0xFE, 0x3, 0x7, 0x5E1)\n Jump(\"loc_244\")\n\n label(\"loc_1FC\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_215\")\n OP_99(0xFE, 0x4, 0x7, 0x5AF)\n Jump(\"loc_244\")\n\n label(\"loc_215\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_22E\")\n OP_99(0xFE, 0x5, 0x7, 0x57D)\n Jump(\"loc_244\")\n\n label(\"loc_22E\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_244\")\n OP_99(0xFE, 0x6, 0x7, 0x54B)\n\n label(\"loc_244\")\n\n Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), \"loc_259\")\n OP_99(0xFE, 0x0, 0x7, 0x5DC)\n Jump(\"loc_244\")\n\n label(\"loc_259\")\n\n Return()\n\n # Function_2_DD end\n\n def Function_3_25A(): pass\n\n label(\"Function_3_25A\")\n\n TalkBegin(0xFE)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 2)), scpexpr(EXPR_END)), \"loc_6C4\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), \"loc_34F\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), \"loc_2B2\")\n\n ChrTalk( #0\n 0xFE,\n (\n \"I reckon my happiness is right here in this\\x01\",\n \"lighthouse.\\x02\",\n )\n )\n\n CloseMessageWindow()\n Jump(\"loc_34C\")\n\n label(\"loc_2B2\")\n\n\n ChrTalk( #1\n 0xFE,\n (\n \"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n \"are looking for.\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #2\n 0xFE,\n \"I reckon that's my happiness...\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x0)\n\n label(\"loc_34C\")\n\n Jump(\"loc_6C1\")\n\n label(\"loc_34F\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 4)), scpexpr(EXPR_END)), \"loc_477\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), \"loc_3DF\")\n\n ChrTalk( #3\n 0xFE,\n (\n \"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n \"and scream for help if you need it!\\x02\",\n )\n )\n\n CloseMessageWindow()\n Jump(\"loc_474\")\n\n label(\"loc_3DF\")\n\n\n ChrTalk( #4\n 0xFE,\n \"You lookin' for some help, young lady?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #5\n 0xFE,\n \"What do you need?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #6\n 0x14E,\n (\n \"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n\n \"#1713FThank you for offering, sir.\\x02\",\n )\n )\n\n CloseMessageWindow()\n OP_A2(0x0)\n\n label(\"loc_474\")\n\n Jump(\"loc_6C1\")\n\n label(\"loc_477\")\n\n EventBegin(0x1)\n OP_8C(0xFE, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(0x14E, -1280, 0, 202300, 270)\n Sleep(1000)\n\n ChrTalk( #7\n 0xFE,\n (\n \"I swear, this is EXACTLY what's wrong\\x01\",\n \"with youngins these days...\\x02\",\n )\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n OP_8C(0xFE, 90, 500)\n Sleep(500)\n\n ChrTalk( #8\n 0xFE,\n \"Wh-What are you doing here, young lady?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #9\n 0x14E,\n (\n \"#1712FU-Umm... Excuse me, sir...\\x02\\x03\",\n\n \"You haven't seen a young girl other\\x01\",\n \"than me in here recently have you?\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #10\n 0xFE,\n \"A young girl? 'Fraid not.\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #11\n 0x14E,\n (\n \"#1713FI-I see...\\x02\\x03\",\n\n \"Sorry for troubling you...\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n def lambda_639():\n\n label(\"loc_639\")\n\n TurnDirection(0xFE, 0x14E, 0)\n OP_48()\n Jump(\"loc_639\")\n\n QueueWorkItem2(0x10, 3, lambda_639)\n OP_43(0x14E, 0x3, 0x0, 0x4)\n Sleep(3000)\n OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)\n Sleep(3000)\n OP_63(0x10)\n\n ChrTalk( #12\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #13\n 0xFE,\n \"They sure are a pain.\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x2F44)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(0x10, 0x3)\n NewScene(\"ED6_DT21/C2219 ._SN\", 107, 0, 0)\n IdleLoop()\n\n label(\"loc_6C1\")\n\n Jump(\"loc_AE8\")\n\n label(\"loc_6C4\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 0)), scpexpr(EXPR_END)), \"loc_AE1\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), \"loc_721\")\n\n ChrTalk( #14\n 0xFE,\n \"A happiness stone, you say?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #15\n 0xFE,\n \"You think somethin' like that exists?\\x02\",\n )\n\n CloseMessageWindow()\n Jump(\"loc_ADE\")\n\n label(\"loc_721\")\n\n EventBegin(0x1)\n OP_8C(0xFE, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(0x14E, -1250, 0, 202480, 270)\n SetChrPos(0x14F, -1060, 0, 201620, 270)\n Sleep(1000)\n\n ChrTalk( #16\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n OP_8C(0xFE, 90, 500)\n Sleep(500)\n\n ChrTalk( #17\n 0xFE,\n \"Wh-What might you two be doing here?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #18\n 0x14E,\n \"#1718FHello!\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x14E, 0x0, 1600, 0x26, 0x27, 0xFA, 0x1)\n Sleep(500)\n OP_63(0x14E)\n\n ChrTalk( #19\n 0x14E,\n (\n \"#1714FActually, lighthouses are pretty high up,\\x01\",\n \"aren't they?\\x02\\x03\",\n\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n \"have you?\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #20\n 0xFE,\n \"A-A happiness stone?!\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #21\n 0x14F,\n \"#1730FThey're really shiny and pretty!\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #22\n 0xFE,\n (\n \"N-No, I don't recall ever seein' any\\x01\",\n \"such thing in all my years...\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #23\n 0x14E,\n (\n \"#1716FOh... That's too bad...\\x02\\x03\",\n\n \"#1710FWell, thank you, anyway.\\x02\",\n )\n )\n\n CloseMessageWindow()\n TurnDirection(0x14E, 0x14F, 400)\n Sleep(400)\n\n ChrTalk( #24\n 0x14E,\n \"#1718FLet's keep looking, Polly! \\x02\",\n )\n\n CloseMessageWindow()\n OP_43(0x14E, 0x3, 0x0, 0x4)\n Sleep(2000)\n\n ChrTalk( #25\n 0x14F,\n \"#1731FI hope your back feels better, mister!\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n\n def lambda_A1A():\n\n label(\"loc_A1A\")\n\n TurnDirection(0xFE, 0x14F, 0)\n OP_48()\n Jump(\"loc_A1A\")\n\n QueueWorkItem2(0x10, 3, lambda_A1A)\n OP_43(0x14F, 0x3, 0x0, 0x4)\n Sleep(3000)\n OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)\n Sleep(3000)\n OP_63(0x10)\n\n ChrTalk( #26\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #27\n 0xFE,\n \"...They're sharp little devils, aren't they?\\x02\",\n )\n\n CloseMessageWindow()\n Sleep(500)\n\n ChrTalk( #28\n 0xFE,\n \"A happiness stone, hmm...?\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x2F43)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(0x10, 0x3)\n NewScene(\"ED6_DT21/C2219 ._SN\", 107, 0, 0)\n IdleLoop()\n\n label(\"loc_ADE\")\n\n Jump(\"loc_AE8\")\n\n label(\"loc_AE1\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E2, 7)), scpexpr(EXPR_END)), \"loc_AE8\")\n\n label(\"loc_AE8\")\n\n TalkEnd(0xFE)\n Return()\n\n # Function_3_25A end\n\n def Function_4_AEC(): pass\n\n label(\"Function_4_AEC\")\n\n\n def lambda_AF2():\n OP_8E(0xFE, 0xB04, 0x0, 0x32104, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_AF2)\n WaitChrThread(0xFE, 0x1)\n\n def lambda_B12():\n OP_8E(0xFE, 0xB04, 0x0, 0x3283E, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_B12)\n WaitChrThread(0xFE, 0x1)\n\n def lambda_B32():\n OP_8E(0xFE, 0xFFFFF254, 0xFFFFF830, 0x328F2, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_B32)\n WaitChrThread(0xFE, 0x1)\n Return()\n\n # Function_4_AEC end\n\n def Function_5_B4D(): pass\n\n label(\"Function_5_B4D\")\n\n TalkBegin(0xFF)\n TalkEnd(0xFF)\n Return()\n\n # Function_5_B4D end\n\n SaveToFile()\n\nTry(main)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch.utils.data
import torch
import math
from util.helpers import *
from collections import defaultdict as ddict
class _Collate:
def __init__(self, ):
pass
def collate(self, batch):
return torch.squeeze(torch.from_numpy(np.array(batch)))
class PR:
dataset = None
eval_data = None
model = None
device = None
most_frequent_rels = None
test_data = None
train_data = None
valid_data = None
eval_test_data = None
topk = None
def init(self, data):
self.model = self.model.to(self.device)
collate_fn = _Collate()
self.eval_loader = torch.utils.data.DataLoader(
data,
Config.eval_batch_size, shuffle=False,
pin_memory=Config.pin_memory, num_workers=Config.loader_num_workers,
collate_fn=collate_fn.collate)
def count_e1_e2_by_relation(self, data):
rel_map = ddict(int)
for r in data.keys():
rel_map[r] = len(data[r])
count_pairs_by_relation = rel_map.items()
count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda x: -x[1])
return count_pairs_by_relation
# computes the position of a tuple for the flattened 1d score matrix
def convert_idx_to_1d(self, tuples_r, n=None):
if n is None:
n = self.model.num_entities
pos_1d = []
row_idx, column_idx = tuples_r
for i in range(len(row_idx)):
pos_1d.append(row_idx[i] * n + column_idx[i])
return pos_1d
def evaluate(self, epoch, logger):
#prepare data
idx_train = ddict(list)
for e1, r, e2 in self.train_data:
idx_train[r].append((e1, e2))
if self.eval_test_data:
idx_valid = ddict(list)
for e1, r, e2 in self.valid_data:
idx_valid[r].append((e1, e2))
idx_test = ddict(list)
for e1, r, e2 in self.test_data:
idx_test[r].append((e1, e2))
tuples_by_relation = self.count_e1_e2_by_relation(idx_test)
relations = np.array([x[0] for x in tuples_by_relation])
#tuples_count = np.array([x[1] for x in tuples_by_relation])
# speedup grid search
if self.most_frequent_rels > 0:
print("Evaluating on {} most frequent relations...".format(self.most_frequent_rels))
relations = relations[:self.most_frequent_rels]
prepare_test = ddict(list)
for e1, r, e2 in self.test_data:
prepare_test[r].append([e1, r, e2])
# sorted data
prepare_test_sorted = ddict(list)
for r in relations:
prepare_test_sorted[r].append(prepare_test[r])
eval_data_prepared = [triple_list for r, triple_list in prepare_test_sorted.items()]
ranks_by_r = ddict(list)
num_true_triples = ddict(list)
self.init(eval_data_prepared)
for i, batch in enumerate(self.eval_loader):
batch = batch.to(self.device)
r = None
if len(batch.shape) >= 2:
r_tensor = batch[0][1]
r = batch[0][1].item()
else:
# only one test triple for a given relation
r_tensor = batch[1]
r = batch[1].item()
print("Evaluating: {} Progress: {}%".format(r, round(i/len(self.eval_loader) * 100, 2)))
scores = ddict(list)
score_matrix = self.model.score_matrix_r(r_tensor)
scores[r].append(score_matrix)
# ----- FILTERING -----
# all e1, e2 for a given relation in test, validation data
tuples_r_test = np.array(prepare_test_sorted[r][0])
tuples_r_test = [tuples_r_test[:,0], tuples_r_test[:,2]]
tuples_r_train = np.array(idx_train[r])
tuples_r_train = [tuples_r_train[:,0], tuples_r_train[:,1]]
score_matrix[tuples_r_train] = -math.inf # Filter training set out
# Filter validation set out
if self.eval_test_data:
tuples_r_valid = np.array(idx_valid[r])
if (len(tuples_r_valid) > 0):
tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[:, 1]]
score_matrix[tuples_r_valid] = -math.inf
# ---- /FILTERING -----
test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)
num_true_triples[r] = len(test_tuples_r_1d)
test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([test_tuples_r_1d]))
topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)
ranks = topk.cpu().data.numpy()
if len(ranks.shape) > 0:
ranks = np.sort(ranks)
print(ranks)
ranks_by_r[r].append(ranks)
print("-----------------------")
avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)
print("TOTAL MAP: {} ".format(avg_map))
print("TOTAL HITS: {}".format(avg_hits))
# save results
if logger is not None:
avg_map = round(avg_map, 4)
avg_hits = round(avg_hits, 4)
logger.log_result(avg_map, avg_hits, epoch, "a")
logger.compare_best(avg_map, avg_hits, epoch, "_best", self.model)
return avg_map, avg_hits
def compute_topk(self, score_matrix, tuples_r_1d):
score_matrix = score_matrix.reshape((1, -1)).flatten()
if len(score_matrix) > self.topk+1:
sorted_k_values, sorted_k_indexs = torch.topk(score_matrix, self.topk, largest=True, sorted=True)
other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)
tuples_r_1d = tuples_r_1d.to(self.device)
if len(tuples_r_1d.size()) > 0:
check = [torch.where(sorted_k_indexs == t, sorted_k_indexs, other) for t in tuples_r_1d if len(torch.nonzero(sorted_k_indexs == t)) > 0]
else:
check = [torch.where(sorted_k_indexs == tuples_r_1d, sorted_k_indexs, other)]
ranks = [torch.nonzero(t)+1 for t in check]
if len(ranks) == 1: # one or zero elements in ranks
ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]
else:
ranks = torch.LongTensor(ranks).to(self.device)
return ranks
def metrics(self, ranks_by_relation, num_true_triples):
total_precision = 0
normalization = 0
total_hits = 0
for r, ranks in ranks_by_relation.items():
total_hits += len(ranks[0])
normalization += min(num_true_triples[r], self.topk)
for idx, rank in enumerate(ranks[0]):
total_precision += (idx + 1) / rank
avg_map = (total_precision / normalization) * 100
avg_hits = (total_hits / normalization) * 100
return avg_map, avg_hits
@staticmethod
def fromConfig(model, dataset):
evaluator = PR()
if dataset is None:
evaluator.dataset = dataset.load()
else:
evaluator.dataset = dataset
evaluator.device = torch.device(Config.eval_device)
torch.set_num_threads(Config.num_threads)
evaluator.model = model
coder = Coder()
data_dir = Config.data_dir
dataset = Config.dataset
train_triples = read_triplets(data_dir + Config.dataset + "/" + Config.raw_split_files['train'], None)
train_triples = coder.construct_encoder(train_triples)
test_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['test'], coder)
test_triples = coder.construct_encoder(test_triples)
valid_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['valid'], coder)
valid_triples = coder.construct_encoder(valid_triples)
evaluator.train_data = train_triples
evaluator.eval_test_data = Config.eval_test_data
if Config.eval_test_data: # use test set for evaluation, training and validation split for filtering
evaluator.test_data = test_triples
evaluator.valid_data = valid_triples
else: # use validation set for evaluation and training set for filtering
evaluator.test_data = valid_triples
evaluator.most_frequent_rels = Config.most_frequent_rels
evaluator.topk = Config.topk
return evaluator
|
normal
|
{
"blob_id": "606a6e7ecc58ecbb11aa53602599e671514bc537",
"index": 3890,
"step-1": "<mask token>\n\n\nclass PR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n <mask token>\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-2": "<mask token>\n\n\nclass PR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-3": "<mask token>\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-4": "<mask token>\n\n\nclass _Collate:\n <mask token>\n\n def collate(self, batch):\n return torch.squeeze(torch.from_numpy(np.array(batch)))\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-5": "import torch.utils.data\nimport torch\nimport math\nfrom util.helpers import *\nfrom collections import defaultdict as ddict\n\nclass _Collate:\n def __init__(self, ):\n pass\n\n def collate(self, batch):\n return torch.squeeze(torch.from_numpy(np.array(batch)))\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(\n data,\n Config.eval_batch_size, shuffle=False,\n pin_memory=Config.pin_memory, num_workers=Config.loader_num_workers,\n collate_fn=collate_fn.collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda x: -x[1])\n return count_pairs_by_relation\n\n # computes the position of a tuple for the flattened 1d score matrix\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n\n def evaluate(self, epoch, logger):\n #prepare data\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n\n relations = np.array([x[0] for x in tuples_by_relation])\n #tuples_count = np.array([x[1] for x in tuples_by_relation])\n\n # speedup grid search\n if self.most_frequent_rels > 0:\n print(\"Evaluating on {} most frequent relations...\".format(self.most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n\n # sorted data\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n\n eval_data_prepared = [triple_list for r, triple_list in prepare_test_sorted.items()]\n\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n\n\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n\n batch = batch.to(self.device)\n r = None\n\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n\n else:\n # only one test triple for a given relation\n r_tensor = batch[1]\n r = batch[1].item()\n print(\"Evaluating: {} Progress: {}%\".format(r, round(i/len(self.eval_loader) * 100, 2)))\n scores = ddict(list)\n\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n\n # ----- FILTERING -----\n # all e1, e2 for a given relation in test, validation data\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:,0], tuples_r_test[:,2]]\n\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:,0], tuples_r_train[:,1]]\n\n score_matrix[tuples_r_train] = -math.inf # Filter training set out\n\n # Filter validation set out\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if (len(tuples_r_valid) > 0):\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[:, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n\n # ---- /FILTERING -----\n\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n\n print(\"-----------------------\")\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n\n print(\"TOTAL MAP: {} \".format(avg_map))\n print(\"TOTAL HITS: {}\".format(avg_hits))\n\n # save results\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, \"a\")\n logger.compare_best(avg_map, avg_hits, epoch, \"_best\", self.model)\n\n return avg_map, avg_hits\n\n\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n\n if len(score_matrix) > self.topk+1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix, self.topk, largest=True, sorted=True)\n\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n\n tuples_r_1d = tuples_r_1d.to(self.device)\n\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs, other) for t in tuples_r_1d if len(torch.nonzero(sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d, sorted_k_indexs, other)]\n\n ranks = [torch.nonzero(t)+1 for t in check]\n if len(ranks) == 1: # one or zero elements in ranks\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n\n return ranks\n\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n\n avg_map = (total_precision / normalization) * 100\n avg_hits = (total_hits / normalization) * 100\n return avg_map, avg_hits\n\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n\n evaluator.device = torch.device(Config.eval_device)\n\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + \"/\" + Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n\n test_triples = read_triplets(data_dir + dataset + \"/\" + Config.raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n\n valid_triples = read_triplets(data_dir + dataset + \"/\" + Config.raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n\n\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n\n if Config.eval_test_data: # use test set for evaluation, training and validation split for filtering\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else: # use validation set for evaluation and training set for filtering\n evaluator.test_data = valid_triples\n\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n\n return evaluator\n\n\n\n\n \n",
"step-ids": [
7,
8,
9,
11,
14
]
}
|
[
7,
8,
9,
11,
14
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.