|
import os |
|
import torch |
|
from torch import cuda, bfloat16 |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig, StoppingCriteria, StoppingCriteriaList |
|
from langchain.llms import HuggingFacePipeline |
|
from langchain.vectorstores import FAISS |
|
from langchain.chains import ConversationalRetrievalChain |
|
import gradio as gr |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from sentence_transformers import CrossEncoder |
|
|
|
HF_TOKEN = os.environ.get("HF_TOKEN", None) |
|
|
|
class StopOnTokens(StoppingCriteria): |
|
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: |
|
for stop_ids in stop_token_ids: |
|
if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all(): |
|
return True |
|
return False |
|
|
|
model_id = 'meta-llama/Meta-Llama-3-8B-Instruct' |
|
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu' |
|
|
|
bnb_config = BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_quant_type='nf4', |
|
bnb_4bit_use_double_quant=True, |
|
bnb_4bit_compute_dtype=bfloat16 |
|
) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN) |
|
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", token=HF_TOKEN, quantization_config=bnb_config) |
|
|
|
stop_list = ['\nHuman:', '\n```\n'] |
|
stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list] |
|
stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids] |
|
stopping_criteria = StoppingCriteriaList([StopOnTokens()]) |
|
|
|
generate_text = pipeline( |
|
model=model, |
|
tokenizer=tokenizer, |
|
return_full_text=True, |
|
task='text-generation', |
|
stopping_criteria=stopping_criteria, |
|
temperature=0.1, |
|
max_new_tokens=512, |
|
repetition_penalty=1.1 |
|
) |
|
|
|
llm = HuggingFacePipeline(pipeline=generate_text) |
|
|
|
"""Load the stored FAISS index""" |
|
try: |
|
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cuda"}) |
|
vectorstore = FAISS.load_local('faiss_index', embeddings) |
|
print("Loaded embeddings from FAISS Index successfully") |
|
except ImportError as e: |
|
print("FAISS could not be imported. Make sure FAISS is installed correctly.") |
|
raise e |
|
|
|
chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True) |
|
|
|
chat_history = [] |
|
|
|
reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2') |
|
|
|
def format_prompt(query): |
|
prompt = f""" |
|
You are a knowledgeable assistant with access to a comprehensive database. |
|
I need you to answer my question and provide related information in a specific format. |
|
Here's what I need: |
|
1. A brief, general response to my question based on related answers retrieved. |
|
2. A JSON-formatted output containing: |
|
- "question": The original question. |
|
- "answer": The detailed answer. |
|
- "related_questions": A list of related questions and their answers, each as a dictionary with the keys: |
|
- "question": The related question. |
|
- "answer": The related answer. |
|
Here's my question: |
|
{query} |
|
Include a brief final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point. |
|
""" |
|
return prompt |
|
|
|
def qa_infer(query): |
|
formatted_prompt = format_prompt(query) |
|
results = chain({"question": formatted_prompt, "chat_history": chat_history}) |
|
|
|
documents = results['source_documents'] |
|
query_document_pairs = [[query, doc.page_content] for doc in documents] |
|
scores = reranker.predict(query_document_pairs) |
|
|
|
"""Sort documents based on the re-ranker scores""" |
|
ranked_docs = sorted(zip(scores, documents), key=lambda x: x[0], reverse=True) |
|
|
|
"""Extract the best document""" |
|
best_doc = ranked_docs[0][1].page_content if ranked_docs else "" |
|
|
|
return best_doc |
|
|
|
EXAMPLES = ["How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM", |
|
"Can BQ25896 support I2C interface?", |
|
"Does TDA2 vout support bt656 8-bit mode?"] |
|
|
|
demo = gr.Interface(fn=qa_infer, inputs="text", allow_flagging='never', examples=EXAMPLES, cache_examples=False, outputs="text") |
|
demo.launch() |
|
|