ankur310794's picture
Faiss error fixed
338de82
raw
history blame
4.73 kB
import spacy
import wikipedia
from wikipedia.exceptions import DisambiguationError
from transformers import TFAutoModel, AutoTokenizer
import numpy as np
import pandas as pd
import faiss
try:
nlp = spacy.load("en_core_web_sm")
except:
spacy.cli.download("en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
wh_words = ['what', 'who', 'how', 'when', 'which']
def get_concepts(text):
text = text.lower()
doc = nlp(text)
concepts = []
for chunk in doc.noun_chunks:
if chunk.text not in wh_words:
concepts.append(chunk.text)
return concepts
def get_passages(text, k=100):
doc = nlp(text)
passages = []
passage_len = 0
passage = ""
sents = list(doc.sents)
for i in range(len(sents)):
sen = sents[i]
passage_len+=len(sen)
if passage_len >= k:
passages.append(passage)
passage = sen.text
passage_len = len(sen)
continue
elif i==(len(sents)-1):
passage+=" "+sen.text
passages.append(passage)
passage = ""
passage_len = 0
continue
passage+=" "+sen.text
return passages
def get_dicts_for_dpr(concepts, n_results=20, k=100):
dicts = []
for concept in concepts:
wikis = wikipedia.search(concept, results=n_results)
print(concept, "No of Wikis: ",len(wikis))
for wiki in wikis:
try:
html_page = wikipedia.page(title = wiki, auto_suggest = False)
except DisambiguationError:
continue
passages = get_passages(html_page.content, k=k)
for passage in passages:
i_dicts = {}
i_dicts['text'] = passage
i_dicts['title'] = wiki
dicts.append(i_dicts)
return dicts
passage_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
query_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
p_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
q_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
def get_title_text_combined(passage_dicts):
res = []
for p in passage_dicts:
res.append(tuple((p['title'], p['text'])))
return res
def extracted_passage_embeddings(processed_passages, max_length=156):
passage_inputs = p_tokenizer.batch_encode_plus(
processed_passages,
add_special_tokens=True,
truncation=True,
padding="max_length",
max_length=max_length,
return_token_type_ids=True
)
passage_embeddings = passage_encoder.predict([np.array(passage_inputs['input_ids']),
np.array(passage_inputs['attention_mask']),
np.array(passage_inputs['token_type_ids'])],
batch_size=64,
verbose=1)
return passage_embeddings
def extracted_query_embeddings(queries, max_length=64):
query_inputs = q_tokenizer.batch_encode_plus(
queries,
add_special_tokens=True,
truncation=True,
padding="max_length",
max_length=max_length,
return_token_type_ids=True
)
query_embeddings = query_encoder.predict([np.array(query_inputs['input_ids']),
np.array(query_inputs['attention_mask']),
np.array(query_inputs['token_type_ids'])],
batch_size=1,
verbose=1)
return query_embeddings
def search(question):
concepts = get_concepts(question)
print("concepts: ",concepts)
dicts = get_dicts_for_dpr(concepts, n_results=1)
print("dicts len: ", len(dicts))
processed_passages = get_title_text_combined(dicts)
passage_embeddings = extracted_passage_embeddings(processed_passages)
query_embeddings = extracted_query_embeddings([question])
faiss_index = faiss.IndexFlatL2(128)
faiss_index.add(passage_embeddings.pooler_output)
prob, index = faiss_index.search(query_embeddings.pooler_output, k=10)
return pd.DataFrame([dicts[i] for i in index[0]])
import gradio as gr
inp = gr.inputs.Textbox(lines=2, default="Who is aamir khan?", label="Question")
out = gr.outputs.Dataframe(label="Answers")
gr.Interface(fn=search, inputs=inp, outputs=out).launch()