|
import gradio as gr |
|
import transformers |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import accelerate |
|
import einops |
|
import langchain |
|
import xformers |
|
import os |
|
import bitsandbytes |
|
import sentence_transformers |
|
import huggingface_hub |
|
import torch |
|
from torch import cuda, bfloat16 |
|
from transformers import StoppingCriteria, StoppingCriteriaList |
|
from langchain.llms import HuggingFacePipeline |
|
from langchain.document_loaders import TextLoader, DirectoryLoader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.vectorstores import FAISS |
|
from langchain.chains import ConversationalRetrievalChain |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
|
|
|
|
""" |
|
Loading of the LLama3 model |
|
""" |
|
HF_TOKEN = os.environ.get("HF_TOKEN", None) |
|
model_id = 'meta-llama/Meta-Llama-3-8B' |
|
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu' |
|
|
|
|
|
"""set quantization configuration to load large model with less GPU memory |
|
this requires the `bitsandbytes` library""" |
|
bnb_config = transformers.BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_quant_type='nf4', |
|
bnb_4bit_use_double_quant=True, |
|
bnb_4bit_compute_dtype=bfloat16 |
|
) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct",token=HF_TOKEN) |
|
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto",token=HF_TOKEN,quantization_config=bnb_config) |
|
terminators = [ |
|
tokenizer.eos_token_id, |
|
tokenizer.convert_tokens_to_ids("<|eot_id|>") |
|
] |
|
|
|
"""CPU""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Setting up the stop list to define stopping criteria. |
|
""" |
|
|
|
stop_list = ['\nHuman:', '\n```\n'] |
|
|
|
stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list] |
|
stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids] |
|
|
|
|
|
|
|
class StopOnTokens(StoppingCriteria): |
|
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: |
|
for stop_ids in stop_token_ids: |
|
if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all(): |
|
return True |
|
return False |
|
|
|
stopping_criteria = StoppingCriteriaList([StopOnTokens()]) |
|
|
|
|
|
generate_text = transformers.pipeline( |
|
model=model, |
|
tokenizer=tokenizer, |
|
return_full_text=True, |
|
task='text-generation', |
|
|
|
stopping_criteria=stopping_criteria, |
|
temperature=0.1, |
|
max_new_tokens=512, |
|
repetition_penalty=1.1 |
|
) |
|
|
|
llm = HuggingFacePipeline(pipeline=generate_text) |
|
|
|
loader = DirectoryLoader('data2/text/', loader_cls=TextLoader) |
|
documents = loader.load() |
|
print('len of documents are',len(documents)) |
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250) |
|
all_splits = text_splitter.split_documents(documents) |
|
print(all_splits[0]) |
|
print("#########################################") |
|
print(all_splits[0]) |
|
print("#########################################") |
|
print(all_splits[0]) |
|
model_name = "sentence-transformers/all-mpnet-base-v2" |
|
model_kwargs = {"device": "cuda"} |
|
|
|
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs) |
|
|
|
|
|
vectorstore = FAISS.from_documents(all_splits, embeddings) |
|
|
|
chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True) |
|
|
|
chat_history = [] |
|
|
|
def format_prompt(query): |
|
|
|
prompt = f""" |
|
You are a knowledgeable assistant with access to a comprehensive database. |
|
I need you to answer my question and provide related information in a specific format. |
|
|
|
Here's what I need: |
|
1. A brief, general response to my question based on related answers retrieved. |
|
2. A JSON-formatted output containing: |
|
- "question": The original question. |
|
- "answer": The detailed answer. |
|
- "related_questions": A list of related questions and their answers, each as a dictionary with the keys: |
|
- "question": The related question. |
|
- "answer": The related answer. |
|
|
|
Here's my question: |
|
{query} |
|
|
|
Include a brief final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point. |
|
""" |
|
return prompt |
|
|
|
|
|
def qa_infer(query): |
|
formatted_prompt = format_prompt(query) |
|
result = chain({"question": formatted_prompt, "chat_history": chat_history}) |
|
return result['answer'] |
|
|
|
|
|
|
|
|
|
EXAMPLES = [" How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM", |
|
"Can BQ25896 support I2C interface?", |
|
"Does TDA2 vout support bt656 8-bit mode?"] |
|
|
|
demo = gr.Interface(fn=qa_infer, inputs="text",allow_flagging='never', examples=EXAMPLES, |
|
cache_examples=False,outputs="text") |
|
|
|
|
|
|
|
|
|
demo.launch() |