File size: 3,207 Bytes
c04f9ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import os; import json; import requests
import streamlit as st
ES_URL = os.environ.get("ES_URL")

question = 'What is the capital of Netherlands?'
query_text = 'Query used for keyword search (you can also edit, and experiment with the responses)'
written_question = st.text_input(query_text, question)
if written_question:
    question = written_question
if st.button('Run keyword search'):
    if question:
        try:
            # qa_result = pipe_exqa(question=question, context=paragraph)
            url = f"{ES_URL}/document/_search?pretty"
            # payload = json.dumps({"query":{"match":{"content":"moldova"}}})
            payload = json.dumps({"query": {
                "more_like_this": { "like": question, # "What is the capital city of Netherlands?"
                "fields": ["content"], "min_term_freq": 1.9, "min_doc_freq": 4, "max_query_terms": 50
            }}})
            headers = {'Content-Type': 'application/json'}
            response = requests.request("GET", url, headers=headers, data=payload) 
            qa_result = response.json() # print(response.text)
            
        except Exception as e:
            qa_result = str(e)

        # if "answer" in qa_result.keys():
        #     answer_span, answer_score = qa_result["answer"], qa_result["score"]
        #     st.write(f'Answer: **{answer_span}**')
        #     start_par, stop_para = max(0, qa_result["start"]-86), min(qa_result["end"]+90, len(paragraph))
        #     answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**')
        #     st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})')
        
        st.write(f'Answer JSON: '); st.write(qa_result)
    else:
        st.write('Write a query to submit your keyword search'); st.stop()

"""
result_first_two_hits = result['hits']['hits'][:2] # print("First 2 results:")
question_similarity = [ (hit['_score'], hit['_source']['content'][:200])
    for hit in result_first_two_hits ] # print(question_similarity)

top_hit = result['hits']['hits'][0]
context = top_hit['_source']['content']
# context = r" Extractive Question Answering is the task of extracting   
# an answer from a text given a question. An example of a question  
# answering dataset is the SQuAD dataset, which is entirely based   
# on that task. If you would like to fine-tune a model on a SQuAD task,  
# you may leverage the `examples/pytorch/question-answering/run_squad.py` script."
question = input # "What is extractive question answering?" 
# "What is a good example of a question answering dataset?"
print(question)
context = context[:5000]
print(context)
try:
    qa_result = pipe_exqa(question=question, context=context)
except Exception as e:
    return {"output": str(e)}

return {"output": str(qa_result)}
    
answer = qa_result['answer']
score = round(qa_result['score'], 4)
span = f"start: {qa_result['start']}, end: {qa_result['end']}"
# st.write(answer); st.write(f"score: {score}"); st.write(f"span: {span}")
output = f"{str(answer)} \n {str(score)} \n {str(span)}"

return {"output": output} or {"output": str(question_similarity)} or result or {"Hello": "World!"}
"""