Delete qgen_server.py
Browse files- qgen_server.py +0 -38
qgen_server.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
|
2 |
-
import os
|
3 |
-
from flask import request, jsonify
|
4 |
-
from transformers import pipeline
|
5 |
-
|
6 |
-
# Define a writable directory for the model cache
|
7 |
-
cache_dir = os.path.join(os.getenv("XDG_CACHE_HOME", "/tmp/.cache"), "huggingface_models")
|
8 |
-
os.makedirs(cache_dir, exist_ok=True)
|
9 |
-
|
10 |
-
print("Loading Question Generation model (iarfmoose/t5-base-question-generator)...")
|
11 |
-
# Initialize the pipeline for text2text-generation with the specified model
|
12 |
-
qg_model = pipeline("text2text-generation", model="iarfmoose/t5-base-question-generator", model_kwargs={"cache_dir": cache_dir})
|
13 |
-
print("Question Generation model loaded.")
|
14 |
-
|
15 |
-
def handle_generate_questions():
|
16 |
-
data = request.get_json()
|
17 |
-
if not data or 'text' not in data:
|
18 |
-
return jsonify({'error': 'Invalid request. "text" field is required.'}), 400
|
19 |
-
|
20 |
-
text = data['text']
|
21 |
-
|
22 |
-
# Prepend the text with "generate questions: " as required by this model
|
23 |
-
input_text = f"generate questions: {text}"
|
24 |
-
|
25 |
-
try:
|
26 |
-
# Generate questions
|
27 |
-
results = qg_model(input_text, max_length=64, num_beams=4, early_stopping=True)
|
28 |
-
|
29 |
-
# The result is a single string with questions separated by '<sep>'
|
30 |
-
generated_text = results[0]['generated_text']
|
31 |
-
questions = [q.strip() for q in generated_text.split('<sep>') if q.strip()]
|
32 |
-
|
33 |
-
print(f"Generated questions for text: '{text[:50]}...' -> {questions}")
|
34 |
-
|
35 |
-
return jsonify({'questions': questions})
|
36 |
-
except Exception as e:
|
37 |
-
print(f"Error during question generation: {e}")
|
38 |
-
return jsonify({'error': str(e)}), 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|