Spaces:
Runtime error
Runtime error
File size: 6,279 Bytes
dd2e81b e57e119 dd2e81b af0adc9 b0c3b26 dd2e81b 4b691ed dd2e81b 4b691ed b9fd0ac 4b691ed dd2e81b b9fd0ac 6076a17 dd2e81b 4b691ed dd2e81b 3817e1c dd2e81b 3817e1c dd2e81b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import gradio as gr
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import accelerate
import einops
import langchain
import xformers
import os
import bitsandbytes
import sentence_transformers
import huggingface_hub
import torch
from torch import cuda, bfloat16
from transformers import StoppingCriteria, StoppingCriteriaList
from langchain.llms import HuggingFacePipeline
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from huggingface_hub import InferenceClient
# Login to Hugging Face using a token
# huggingface_hub.login(HF_TOKEN)
"""
Loading of the LLama3 model
"""
HF_TOKEN = os.environ.get("HF_TOKEN", None)
model_id = 'meta-llama/Meta-Llama-3-8B'
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
"""set quantization configuration to load large model with less GPU memory
this requires the `bitsandbytes` library"""
bnb_config = transformers.BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=bfloat16
)
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct",token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto",token=HF_TOKEN,quantization_config=bnb_config) # to("cuda:0")
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("<|eot_id|>")
]
"""CPU"""
# model_config = transformers.AutoConfig.from_pretrained(
# model_id,
# token=HF_TOKEN,
# # use_auth_token=hf_auth
# )
# model = transformers.AutoModelForCausalLM.from_pretrained(
# model_id,
# trust_remote_code=True,
# config=model_config,
# # quantization_config=bnb_config,
# token=HF_TOKEN,
# # use_auth_token=hf_auth
# )
# model.eval()
# tokenizer = transformers.AutoTokenizer.from_pretrained(
# model_id,
# token=HF_TOKEN,
# # use_auth_token=hf_auth
# )
# generate_text = transformers.pipeline(
# model=self.model, tokenizer=self.tokenizer,
# return_full_text=True,
# task='text-generation',
# temperature=0.01,
# max_new_tokens=512
# )
"""
Setting up the stop list to define stopping criteria.
"""
stop_list = ['\nHuman:', '\n```\n']
stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list]
stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids]
# define custom stopping criteria object
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
for stop_ids in stop_token_ids:
if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all():
return True
return False
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
generate_text = transformers.pipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True, # langchain expects the full text
task='text-generation',
# we pass model parameters here too
stopping_criteria=stopping_criteria, # without this model rambles during chat
temperature=0.1, # 'randomness' of outputs, 0.0 is the min and 1.0 the max
max_new_tokens=512, # max number of tokens to generate in the output
repetition_penalty=1.1 # without this output begins repeating
)
llm = HuggingFacePipeline(pipeline=generate_text)
loader = DirectoryLoader('data2/text/', loader_cls=TextLoader)
documents = loader.load()
print('len of documents are',len(documents))
text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250)
all_splits = text_splitter.split_documents(documents)
print(all_splits[0])
print("#########################################")
print(all_splits[0])
print("#########################################")
print(all_splits[0])
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {"device": "cuda"}
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
# storing embeddings in the vector store
vectorstore = FAISS.from_documents(all_splits, embeddings)
chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True)
chat_history = []
def format_prompt(query):
# Construct a clear and structured prompt to guide the LLM's response
prompt = f"""
You are a knowledgeable assistant with access to a comprehensive database.
I need you to answer my question and provide related information in a specific format.
Here's what I need:
1. A brief, general response to my question based on related answers retrieved.
2. A JSON-formatted output containing:
- "question": The original question.
- "answer": The detailed answer.
- "related_questions": A list of related questions and their answers, each as a dictionary with the keys:
- "question": The related question.
- "answer": The related answer.
Here's my question:
{query}
Include a brief final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point.
"""
return prompt
def qa_infer(query):
formatted_prompt = format_prompt(query)
result = chain({"question": formatted_prompt, "chat_history": chat_history})
return result['answer']
# query = "What` is the best TS pin configuration for BQ24040 in normal battery charge mode"
# qa_infer(query)
EXAMPLES = [" How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM",
"Can BQ25896 support I2C interface?",
"Does TDA2 vout support bt656 8-bit mode?"]
demo = gr.Interface(fn=qa_infer, inputs="text",allow_flagging='never', examples=EXAMPLES,
cache_examples=False,outputs="text")
# launch the app!
#demo.launch(enable_queue = True,share=True)
#demo.queue(default_enabled=True).launch(debug=True,share=True)
demo.launch() |