Spaces:
Sleeping
Sleeping
import base64 | |
import uvicorn | |
from contextlib import asynccontextmanager | |
import sqlite3 | |
from fastapi import FastAPI, HTTPException | |
from fastapi.responses import JSONResponse | |
from pydantic import BaseModel | |
from openai import OpenAI | |
import os | |
# OpenAI APIクライアントを初期化 | |
client = OpenAI() | |
client.api_key = os.getenv('OPENAI_API_KEY') | |
async def lifespan(app: FastAPI): | |
print("startup event") | |
yield | |
print("shutdown event") | |
app = FastAPI(lifespan=lifespan) | |
DATABASE = 'workbook.db' | |
def get_db(): | |
db = getattr(app.state, '_database', None) | |
if db is None: | |
db = app.state._database = sqlite3.connect(DATABASE) | |
return db | |
def get_chapters(): | |
db = get_db() | |
cursor = db.cursor() | |
cursor.execute(""" | |
SELECT c.chapter, c.chapter_name, COUNT(q.question) as n_questions | |
FROM chapter c | |
LEFT JOIN question q ON c.chapter = q.chapter | |
GROUP BY c.chapter, c.chapter_name | |
""") | |
chapters = cursor.fetchall() | |
chapter_list = [ | |
{ | |
"chapter_number": row[0], | |
"chapter_name": row[1], | |
"n_questions": row[2] | |
} for row in chapters | |
] | |
return JSONResponse(content=chapter_list) | |
class ChapterRequest(BaseModel): | |
chapter_number: int | |
def get_questions_by_chapter(request: ChapterRequest): | |
db = get_db() | |
cursor = db.cursor() | |
cursor.execute("SELECT question, options, answer, explain, image FROM question WHERE chapter = ? AND kind = '正誤問題'", (request.chapter_number,)) | |
questions = cursor.fetchall() | |
if not questions: | |
raise HTTPException(status_code=404, detail="Questions not found for the given chapter number") | |
question_list = [] | |
for row in questions: | |
if row[4] is None: | |
encoded_string = '' | |
else: | |
image_path = os.path.join('./image', row[4]) | |
with open(image_path, "rb") as image_file: | |
encoded_string = base64.b64encode(image_file.read()).decode('utf-8') | |
question_list.append({ | |
"question_text": row[0], | |
"options": row[1], | |
"answer": row[2], | |
"explanation": row[3], | |
"image": encoded_string, | |
}) | |
return JSONResponse(content=question_list) | |
class HintRequest(BaseModel): | |
question_text: str | |
options: str | |
answer: str | |
def generate_hint(request: HintRequest): | |
prompt = f"設問: {request.question_text}\n選択肢: {request.options}\n正解: {request.answer}\nこの設問に対するヒントを生成してください。なおレスポンスはヒント文章のみとせよ。冒頭に「ヒント:」などの見出しをつけるな。" | |
print(prompt) | |
try: | |
response = client.chat.completions.create( | |
model="gpt-4o-mini", | |
messages=[ | |
{ | |
"role": "user", | |
"content": [{'type': 'text', 'text': prompt}], | |
}, | |
], | |
) | |
response_content = response.choices[0].message.content | |
return JSONResponse(content={"hint": response_content}) | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
if __name__ == '__main__': | |
uvicorn.run(app, host="127.0.0.1", port=8000, log_level="debug") |