Test / app.py
Namitg02's picture
Update app.py
c1ea140 verified
raw
history blame
8.56 kB
from datasets import load_dataset
from datasets import Dataset
from sentence_transformers import SentenceTransformer
import faiss
import time
#import torch
import pandas as pd
from transformers import AutoTokenizer, GenerationConfig #, AutoModelForCausalLM
#from transformers import AutoModelForCausalLM, AutoModel
from transformers import TextIteratorStreamer
from threading import Thread
from ctransformers import AutoModelForCausalLM, AutoConfig, Config #, AutoTokenizer
from huggingface_hub import Repository, upload_file
import os
HF_TOKEN = os.getenv('HF_Token')
#Log_Path="./Logfolder"
logfile = 'DiabetesChatLog.txt'
historylog = [{
"Prompt": '',
"Output": ''
}]
data = load_dataset("Namitg02/Test", split='train', streaming=False)
#Returns a list of dictionaries, each representing a row in the dataset.
length = len(data)
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
embedding_dim = embedding_model.get_sentence_embedding_dimension()
# Returns dimensions of embedidng
index = faiss.IndexFlatL2(embedding_dim)
data.add_faiss_index("embeddings", custom_index=index)
# adds an index column for the embeddings
print("check1")
#question = "How can I reverse Diabetes?"
SYS_PROMPT = """You are an assistant for answering questions.
You are given the extracted parts of documents and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer."""
# Provides context of how to answer the question
llm_model = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
# TheBloke/Llama-2-7B-Chat-GGML , TinyLlama/TinyLlama-1.1B-Chat-v1.0 , microsoft/Phi-3-mini-4k-instruct, health360/Healix-1.1B-V1-Chat-dDPO
# TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF and tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf not working, TinyLlama/TinyLlama-1.1B-Chat-v0.6, andrijdavid/TinyLlama-1.1B-Chat-v1.0-GGUF"
tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
#initiate model and tokenizer
generation_config = AutoConfig.from_pretrained(
"TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
max_new_tokens= 300,
# do_sample=True,
# stream = streamer,
top_p=0.95,
temperature=0.4,
stream = True
# eos_token_id=terminators
)
# send additional parameters to model for generation
model = AutoModelForCausalLM.from_pretrained(llm_model, model_file = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", model_type="llama", gpu_layers=0, config = generation_config)
def search(query: str, k: int = 2 ):
"""a function that embeds a new query and returns the most probable results"""
embedded_query = embedding_model.encode(query) # create embedding of a new query
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
"embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
k=k # get only top k results
)
return scores, retrieved_examples
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
# called by talk function that passes prompt
#print(scores, retrieved_examples)
def format_prompt(prompt,retrieved_documents,k):
"""using the retrieved documents we will prompt the model to generate our responses"""
PROMPT = f"Question:{prompt}\nContext:"
for idx in range(k) :
PROMPT+= f"{retrieved_documents['0'][idx]}\n"
return PROMPT
# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string taht are retreived
def talk(prompt, history):
k = 2 # number of retrieved documents
scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
print(retrieved_documents.keys())
print("check4")
formatted_prompt = format_prompt(prompt,retrieved_documents,k) # create a new prompt using the retrieved documents
print("check5")
print(retrieved_documents['0'])
print(formatted_prompt)
formatted_prompt = formatted_prompt[:600] # to avoid memory issue
print(formatted_prompt)
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
# binding the system context and new prompt for LLM
# the chat template structure should be based on text generation model format
print("check6")
streamer = TextIteratorStreamer(
tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
)
# stores print-ready text in a queue, to be used by a downstream application as an iterator. removes special tokens in generated text.
# timeout for text queue. tokenizer for decoding tokens
# called by generate_kwargs
terminators = [
tokenizer.eos_token_id, # End-of-Sequence Token that indicates where the model should consider the text sequence to be complete
tokenizer.convert_tokens_to_ids("<|eot_id|>") # Converts a token strings in a single/ sequence of integer id using the vocabulary
]
# indicates the end of a sequence
input_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt"
)
# preparing tokens for model input
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
# print(input_ids)
# print("check7")
# print(input_ids.dtype)
# generate_kwargs = dict(
# tokens= input_ids) #,
# streamer=streamer,
# do_sample=True,
# eos_token_id=terminators,
# )
# outputs = model.generate(
# )
# print(outputs)
# calling the model to generate response based on message/ input
# do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
# temperature controls randomness. more renadomness with higher temperature
# only the tokens comprising the top_p probability mass are considered for responses
# This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
#
# print("check10")
# t = Thread(target=model.generate, kwargs=generate_kwargs)
# to process multiple instances
# t.start()
# print("check11")
# start a thread
outputs = []
print(messages)
print(*messages)
# input_ids = tokenizer(*messages)
print(model.generate(tensor([[ 1, 529, 29989, 5205, 29989]])))
start = time.time()
NUM_TOKENS=0
print('-'*4+'Start Generation'+'-'*4)
for token in model.generate(input_ids):
print(model.detokenize(input_ids), end='', flush=True)
NUM_TOKENS+=1
time_generate = time.time() - start
print('\n')
print('-'*4+'End Generation'+'-'*4)
print(f'Num of generated tokens: {NUM_TOKENS}')
print(f'Time for complete generation: {time_generate}s')
print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
#outputtokens = model.generate(input_ids)
print("check9")
#print(outputtokens)
#outputs = model.detokenize(outputtokens, decode = True)
#print(outputs)
# for token in model.generate(input_ids):
# print(model.detokenize(token))
# outputs.append(model.detokenize(token))
# output = model.detokenize(token)
# print(outputs)
# yield "".join(outputs)
# print("check12")
pd.options.display.max_colwidth = 800
print("check13")
# outputstring = ''.join(outputs)
# global historylog
# historynew = {
# "Prompt": prompt,
# "Output": outputstring
# }
# historylog.append(historynew)
# return historylog
# print(historylog)
TITLE = "AI Copilot for Diabetes Patients"
DESCRIPTION = "I provide answers to concerns related to Diabetes"
import gradio as gr
# Design chatbot
demo = gr.ChatInterface(
fn=talk,
chatbot=gr.Chatbot(
show_label=True,
show_share_button=True,
show_copy_button=True,
likeable=True,
layout="bubble",
bubble_full_width=False,
),
theme="Soft",
examples=[["what is Diabetes? "]],
title=TITLE,
description=DESCRIPTION,
)
# launch chatbot and calls the talk function which in turn calls other functions
print("check14")
#print(historylog)
#memory_panda = pd.DataFrame(historylog)
#Logfile = Dataset.from_pandas(memory_panda)
#Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
demo.launch()