Test / app.py
Namitg02's picture
Update app.py
e730fd2 verified
raw
history blame
8.17 kB
from datasets import load_dataset
from datasets import Dataset
from sentence_transformers import SentenceTransformer
import faiss
import time
import json
#import torch
import pandas as pd
from llama_cpp import Llama
from langchain_community.llms import LlamaCpp
from transformers import AutoTokenizer, GenerationConfig #, AutoModelForCausalLM
#from transformers import AutoModelForCausalLM, AutoModel
from transformers import TextIteratorStreamer
from threading import Thread
from ctransformers import AutoModelForCausalLM, AutoConfig, Config #, AutoTokenizer
from huggingface_hub import Repository, upload_file
import os
HF_TOKEN = os.getenv('HF_Token')
#Log_Path="./Logfolder"
logfile = 'DiabetesChatLog.txt'
historylog = [{
"Prompt": '',
"Output": ''
}]
data = load_dataset("Namitg02/Test", split='train', streaming=False)
#Returns a list of dictionaries, each representing a row in the dataset.
length = len(data)
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
embedding_dim = embedding_model.get_sentence_embedding_dimension()
# Returns dimensions of embedidng
index = faiss.IndexFlatL2(embedding_dim)
data.add_faiss_index("embeddings", custom_index=index)
# adds an index column for the embeddings
print("check1")
#question = "How can I reverse Diabetes?"
SYS_PROMPT = """You are an assistant for answering questions.
You are given the extracted parts of documents and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer."""
# Provides context of how to answer the question
llm_model = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
# TheBloke/Llama-2-7B-Chat-GGML , TinyLlama/TinyLlama-1.1B-Chat-v1.0 , microsoft/Phi-3-mini-4k-instruct, health360/Healix-1.1B-V1-Chat-dDPO
# TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF and tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf not working, TinyLlama/TinyLlama-1.1B-Chat-v0.6, andrijdavid/TinyLlama-1.1B-Chat-v1.0-GGUF"
tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
#initiate model and tokenizer
generation_config = AutoConfig.from_pretrained(
"TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
max_new_tokens= 300,
# do_sample=True,
# stream = streamer,
top_p=0.95,
temperature=0.4,
stream = True
# eos_token_id=terminators
)
# send additional parameters to model for generation
#model = llama_cpp.Llama(model_path = tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf AutoModelForCausalLM.from_pretrained(llm_model, model_file = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", model_type="llama", gpu_layers=0, config = generation_config)
model = Llama(
model_path="./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
chat_format="llama-2",
n_gpu_layers = 0,
temperature=0.75,
max_tokens=500,
top_p=0.95,
# callback_manager=callback_manager,
# verbose=True, # Verbose is required to pass to the callback manager
)
def search(query: str, k: int = 2 ):
"""a function that embeds a new query and returns the most probable results"""
embedded_query = embedding_model.encode(query) # create embedding of a new query
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
"embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
k=k # get only top k results
)
return scores, retrieved_examples
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
# called by talk function that passes prompt
#print(scores, retrieved_examples)
def format_prompt(prompt,retrieved_documents,k):
"""using the retrieved documents we will prompt the model to generate our responses"""
PROMPT = f"Question:{prompt}\nContext:"
for idx in range(k) :
PROMPT+= f"{retrieved_documents['0'][idx]}\n"
return PROMPT
#def add_history(formatted_prompt, history, memory_limit=3):
# always keep len(history) <= memory_limit
# if len(history) > memory_limit:
# history = history[-memory_limit:]
# if len(history) == 0:
# return PROMPT + f"{formatted_prompt} [/INST]"
#formatted_message = PROMPT + f"{history[0][0]} [/INST] {history[0][1]} </s>"
# Handle conversation history
# for user_msg, model_answer in history[1:]:
# formatted_message += f"<s>[INST] {user_msg} [/INST] {model_answer} </s>"
# # Handle the current message
# formatted_message += f"<s>[INST] {formatted_prompt} [/INST]"
#return formatted_message
# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string taht are retreived
def talk(prompt, history):
k = 2 # number of retrieved documents
scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
print(retrieved_documents.keys())
print("check4")
formatted_prompt = format_prompt(prompt,retrieved_documents,k) # create a new prompt using the retrieved documents
print("check5")
print(retrieved_documents['0'])
print(formatted_prompt)
# formatted_prompt_with_history = add_history(formatted_prompt, history)
# formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
# print(formatted_prompt_with_history)
# messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
# binding the system context and new prompt for LLM
# the chat template structure should be based on text generation model format
print("check6")
terminators = [
tokenizer.eos_token_id, # End-of-Sequence Token that indicates where the model should consider the text sequence to be complete
tokenizer.convert_tokens_to_ids("<|eot_id|>") # Converts a token strings in a single/ sequence of integer id using the vocabulary
]
# indicates the end of a sequence
import pprint
stream = model.create_chat_completion(messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}], max_tokens=1000, stop=["</s>"], stream=True)
# print(output['choices'][0]['message']['content'])
pprint.pprint(stream)
text = ""
for output in stream:
text += output['choices'][0]
yield text
# preparing tokens for model input
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
# calling the model to generate response based on message/ input
# do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
# temperature controls randomness. more renadomness with higher temperature
# only the tokens comprising the top_p probability mass are considered for responses
# This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
# start = time.time()
# NUM_TOKENS=0
# time_generate = time.time() - start
# print('\n')
# print('-'*4+'End Generation'+'-'*4)
# print(f'Num of generated tokens: {NUM_TOKENS}')
# print(f'Time for complete generation: {time_generate}s')
# print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
# print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
TITLE = "AI Copilot for Diabetes Patients"
DESCRIPTION = "I provide answers to concerns related to Diabetes"
import gradio as gr
# Design chatbot
demo = gr.ChatInterface(
fn=talk,
chatbot=gr.Chatbot(
show_label=True,
show_share_button=True,
show_copy_button=True,
likeable=True,
layout="bubble",
bubble_full_width=False,
),
theme="Soft",
examples=[["what is Diabetes? "]],
title=TITLE,
description=DESCRIPTION,
)
# launch chatbot and calls the talk function which in turn calls other functions
print("check14")
#print(historylog)
#memory_panda = pd.DataFrame(historylog)
#Logfile = Dataset.from_pandas(memory_panda)
#Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
demo.launch()