Update app.py
Browse files
app.py
CHANGED
@@ -10,22 +10,10 @@ app = FastAPI()
|
|
10 |
|
11 |
# Load models
|
12 |
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
|
13 |
-
question_model = "deepset/tinyroberta-squad2"
|
14 |
-
nlp = pipeline('question-answering', model=question_model, tokenizer=question_model)
|
15 |
|
16 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
17 |
|
18 |
|
19 |
-
class ModifyQueryRequest_v3(BaseModel):
|
20 |
-
query_string_list: List[str]
|
21 |
-
|
22 |
-
|
23 |
-
class T5QuestionRequest(BaseModel):
|
24 |
-
context: str
|
25 |
-
|
26 |
-
class T5Response(BaseModel):
|
27 |
-
answer: str
|
28 |
-
|
29 |
# API endpoints
|
30 |
@app.post("/modify_query")
|
31 |
async def modify_query(request: Request):
|
@@ -46,44 +34,13 @@ async def modify_query_v3(request: Request):
|
|
46 |
except Exception as e:
|
47 |
raise HTTPException(status_code=500, detail=f"Error in modifying query v3: {str(e)}")
|
48 |
|
49 |
-
@app.post("/answer_question")
|
50 |
-
async def answer_question(request: Request):
|
51 |
-
try:
|
52 |
-
raw_data = await request.json()
|
53 |
-
res_locs = []
|
54 |
-
context_string = ''
|
55 |
-
corpus_embeddings = model.encode(raw_data['context'], convert_to_tensor=True)
|
56 |
-
query_embeddings = model.encode(raw_data['question'], convert_to_tensor=True)
|
57 |
-
hits = util.semantic_search(query_embeddings, corpus_embeddings)
|
58 |
-
|
59 |
-
# Collect relevant contexts
|
60 |
-
for hit in hits[0]:
|
61 |
-
if hit['score'] > 0.4:
|
62 |
-
loc = hit['corpus_id']
|
63 |
-
res_locs.append(raw_data['locations'][loc])
|
64 |
-
context_string += raw_data['context'][loc] + ' '
|
65 |
-
|
66 |
-
# If no relevant contexts are found
|
67 |
-
if not res_locs:
|
68 |
-
answer = "Sorry, I couldn't find any results for your query. Please try again!"
|
69 |
-
else:
|
70 |
-
# Use the question-answering pipeline
|
71 |
-
QA_input = {
|
72 |
-
'question': raw_data['question'],
|
73 |
-
'context': context_string.replace('\n', ' ')
|
74 |
-
}
|
75 |
-
result = nlp(QA_input)
|
76 |
-
answer = result['answer']
|
77 |
-
|
78 |
-
return JSONResponse(content={'answer':answer, "location":res_locs})
|
79 |
-
except Exception as e:
|
80 |
-
raise HTTPException(status_code=500, detail=f"Error in answering question: {str(e)}")
|
81 |
|
82 |
-
@app.post("/
|
83 |
-
async def
|
84 |
try:
|
85 |
# Summarize the context
|
86 |
-
|
|
|
87 |
return T5Response(answer=response[0]["summary_text"])
|
88 |
except Exception as e:
|
89 |
raise HTTPException(status_code=500, detail=f"Error in T5 summarization: {str(e)}")
|
|
|
10 |
|
11 |
# Load models
|
12 |
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
|
|
|
|
|
13 |
|
14 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
15 |
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
# API endpoints
|
18 |
@app.post("/modify_query")
|
19 |
async def modify_query(request: Request):
|
|
|
34 |
except Exception as e:
|
35 |
raise HTTPException(status_code=500, detail=f"Error in modifying query v3: {str(e)}")
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
@app.post("/makeanswer")
|
39 |
+
async def makeAnswer(request: Request):
|
40 |
try:
|
41 |
# Summarize the context
|
42 |
+
raw_data = await request.json()
|
43 |
+
response = summarizer(raw_data['context'], max_length=130, min_length=30, do_sample=False)
|
44 |
return T5Response(answer=response[0]["summary_text"])
|
45 |
except Exception as e:
|
46 |
raise HTTPException(status_code=500, detail=f"Error in T5 summarization: {str(e)}")
|