File size: 7,296 Bytes
73c9569 c2250b6 568a490 fe42c39 2a84894 fadbc96 7b9f78b e715c6b fadbc96 e715c6b 1db302e 5ded92d 2a84894 e715c6b f125e9c 4ccccb3 e715c6b ec8bfb4 e715c6b 357e5f2 e715c6b 3b72fd6 26c448b 78f9bd7 e715c6b 7f50a5f 0bc5b18 b937596 377be67 78f9bd7 73c9569 711269b 5e707de 711269b 2a84894 377be67 ed33a3d 7f59cc7 e715c6b 940f5a3 ed33a3d bfcdf28 1db302e 49ba388 bfcdf28 1db302e 3376a6f 1db302e e715c6b 377be67 c494c5e 377be67 2a84894 73c9569 aff5384 e715c6b 377be67 e715c6b e3a5112 b25d43b aff5384 e966c44 3c33f8b e966c44 aff5384 688721c d2de33c 524eae4 303482b e9b987a e715c6b 340946e e715c6b 1ccd79e aff5384 b0d53fc eb94f10 6c719b3 c6f2c3d e715c6b 2a84894 b0d53fc 84ef949 dfb3684 6c719b3 9ad6d25 d76cbf6 27d4ea6 7f8eec1 44f563e 7d4f1e4 2fd9c03 4aa5088 03783a7 8a77bf0 84f6b55 fadbc96 84f6b55 f17e225 c12c34f fadbc96 4cfdec8 84f6b55 f17e225 84f6b55 b4ca68d 8a77bf0 7d4f1e4 8a77bf0 0e53536 c67e680 d76cbf6 ac7ec9f 2a84894 8f3d678 377be67 e715c6b 377be67 73c9569 2a84894 377be67 5cbb91d 377be67 2a84894 b0d53fc 77b7a2a b0d53fc 711269b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
from datasets import load_dataset
from datasets import Dataset
from sentence_transformers import SentenceTransformer
import faiss
import time
from datetime import datetime
import json
#import torch
import uuid
import pandas as pd
from llama_cpp import Llama
#from langchain_community.llms import LlamaCpp
from threading import Thread
from huggingface_hub import Repository, upload_file
import os
HF_TOKEN = os.getenv('HF_Token')
#Log_Path="./Logfolder"
logfile = 'DiabetesChatLog.txt'
data = load_dataset("Namitg02/Test", split='train', streaming=False)
#Returns a list of dictionaries, each representing a row in the dataset.
length = len(data)
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
embedding_dim = embedding_model.get_sentence_embedding_dimension()
# Returns dimensions of embedidng
index = faiss.IndexFlatL2(embedding_dim)
data.add_faiss_index("embeddings", custom_index=index)
# adds an index column for the embeddings
#question = "How can I reverse Diabetes?"
SYS_PROMPT = """You are an assistant for answering questions like a medical person.
You are given the extracted parts of document, a question and history of questions and answers . Provide a brief conversational answer.
If you do not know the answer, just say "I do not know." Do not make up an answer. Don't repeat the SYS_PROMPT or say that you are referring to document or an article."""
# Provides context of how to answer the question
#llm_model = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF", tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf
# TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF , TinyLlama/TinyLlama-1.1B-Chat-v0.6, andrijdavid/TinyLlama-1.1B-Chat-v1.0-GGUF"
model = Llama(
model_path="./llama-2-7b-chat.Q4_K_M.gguf",
# chat_format="llama-2",
n_gpu_layers = 0,
temperature=0.75,
n_ctx = 4096,
top_p=0.95 #,
# eos_tokens=terminators
# callback_manager=callback_manager,
# verbose=True, # Verbose is required to pass to the callback manager
)
#initiate model and tokenizer
def search(query: str, k: int = 2 ):
"""a function that embeds a new query and returns the most probable results"""
embedded_query = embedding_model.encode(query) # create embedding of a new query
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
"embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
k=k # get only top k results
)
return scores, retrieved_examples
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
# called by talk function that passes prompt
def format_prompt(prompt,retrieved_documents,k,history,memory_limit=3):
"""using the retrieved documents we will prompt the model to generate our responses"""
PROMPT = f"Question:{prompt}\nContext:"
for idx in range(k) :
PROMPT+= f"{retrieved_documents['0'][idx]}\n"
print("historyinfo")
print(f"{history}")
if len(history) == 0:
return PROMPT
if len(history) > memory_limit:
history = history[-memory_limit:]
print("checkwohist")
# PROMPT = PROMPT + f"{history[0][0]} [/INST] {history[0][1]} </s>"
# Handle conversation history
for user_message, bot_message in history[0:]:
PROMPT += f"<s>[INST] {user_message} [/INST] {bot_message} </s>"
print("checkwthhist2")
return PROMPT
# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string that are retreived
def talk(prompt, history):
k = 2 # number of retrieved documents
scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
print(retrieved_documents.keys())
# print("check4")
formatted_prompt = format_prompt(prompt,retrieved_documents,k,history,memory_limit=3) # create a new prompt using the retrieved documents
print("check5")
pd.options.display.max_colwidth = 4000
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
print(messages)
# binding the system context and new prompt for LLM
# the chat template structure should be based on text generation model format
# indicates the end of a sequence
stream = model.create_chat_completion(messages = messages,max_tokens =400, stop=["</s>"], stream=False)
# print(f"{stream}")
print("check 7")
print(stream['choices'][0]['message']['content'])
response = stream['choices'][0]['message']['content']
# for user_message, bot_message in history[0:]:
# historylog += f"<s>[INST] {user_message} [/INST] {bot_message} </s>"
historylog = ''
historylog += f"{prompt} \n {response} "
print("history log")
print(str(historylog))
print("history log string printed")
try:
# write data to file
unique_filename = f"file_{uuid.uuid4()}.txt"
# timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
# filename = f"file_{timestamp}.txt"
with open(unique_filename, "a") as data:
data.write(historylog)
print("History log printed:")
with open(unique_filename, "r") as data:
print(data.read())
# with open("./file.txt", "a") as data:
# data.write(historylog)
# Read the contents of the file to display it.
# print("History log printed:")
# with open("./file.txt", "r") as data:
# print(data.read())
except IOError as e:
print(f"An error occurred: {e}")
# from huggingface_hub import HfApi
# api = HfApi()
# api.upload_file(
# path_or_fileobj="./file.txt",
# path_in_repo="file.txt",
# repo_id="Namitg02/Test",
# repo_type="space"
# )
print("upload section passed")
for i in range(len(response)):
time.sleep(0.05)
yield response[: i+1]
# calling the model to generate response based on message/ input
# do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
# temperature controls randomness. more renadomness with higher temperature
# only the tokens comprising the top_p probability mass are considered for responses
# This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
TITLE = "AI Copilot for Diabetes Patients"
DESCRIPTION = "I provide answers to concerns related to Diabetes"
import gradio as gr
# Design chatbot
demo = gr.ChatInterface(
fn=talk,
chatbot=gr.Chatbot(
show_label=True,
show_share_button=True,
show_copy_button=True,
likeable=True,
layout="bubble",
bubble_full_width=False,
),
theme="Soft",
examples=[["what is Diabetes?"]],
title=TITLE,
description=DESCRIPTION,
)
# launch chatbot and calls the talk function which in turn calls other functions
print("check14")
#memory_panda = pd.DataFrame(historylog)
#Logfile = Dataset.from_pandas(memory_panda)
#Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
demo.launch()
#demo.launch(auth=("namit", "wolfmagic")) |