Namitg02 commited on
Commit
b0d53fc
·
verified ·
1 Parent(s): bee7d75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -113
app.py CHANGED
@@ -1,19 +1,17 @@
1
  from datasets import load_dataset
2
  from datasets import Dataset
3
- #from langchain.docstore.document import Document as LangchainDocument
4
- # from langchain.memory import ConversationBufferMemory
5
  from sentence_transformers import SentenceTransformer
6
  import faiss
7
  import time
8
  #import torch
9
  import pandas as pd
10
 
11
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
12
  from transformers import TextIteratorStreamer
13
  from threading import Thread
14
- #from ctransformers import AutoModelForCausalLM, AutoConfig, Config, AutoTokenizer
15
 
16
- #from huggingface_hub import InferenceClient
17
  from huggingface_hub import Repository, upload_file
18
  import os
19
 
@@ -26,16 +24,6 @@ historylog = [{
26
  "Output": ''
27
  }]
28
 
29
- llm_model = "TinyLlama/TinyLlama-1.1B-Chat-v0.6"
30
-
31
-
32
- # TheBloke/Llama-2-7B-Chat-GGML , TinyLlama/TinyLlama-1.1B-Chat-v1.0 , microsoft/Phi-3-mini-4k-instruct, health360/Healix-1.1B-V1-Chat-dDPO
33
- # TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF and tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf not working
34
-
35
- model = AutoModelForCausalLM.from_pretrained(llm_model)
36
- tokenizer = AutoTokenizer.from_pretrained(llm_model)
37
- #initiate model and tokenizer
38
-
39
  data = load_dataset("Namitg02/Test", split='train', streaming=False)
40
  #Returns a list of dictionaries, each representing a row in the dataset.
41
  length = len(data)
@@ -49,7 +37,7 @@ index = faiss.IndexFlatL2(embedding_dim)
49
  data.add_faiss_index("embeddings", custom_index=index)
50
  # adds an index column for the embeddings
51
 
52
- print("check1d")
53
  #question = "How can I reverse Diabetes?"
54
 
55
  SYS_PROMPT = """You are an assistant for answering questions.
@@ -57,18 +45,25 @@ You are given the extracted parts of documents and a question. Provide a convers
57
  If you don't know the answer, just say "I do not know." Don't make up an answer."""
58
  # Provides context of how to answer the question
59
 
 
 
 
60
 
61
- print("check2")
62
-
63
- # memory = ConversationBufferMemory(return_messages=True)
64
-
65
 
66
- terminators = [
67
- tokenizer.eos_token_id, # End-of-Sequence Token that indicates where the model should consider the text sequence to be complete
68
- tokenizer.convert_tokens_to_ids("<|eot_id|>") # Converts a token strings in a single/ sequence of integer id using the vocabulary
69
- ]
70
- # indicates the end of a sequence
 
 
 
 
 
71
 
 
72
 
73
  def search(query: str, k: int = 2 ):
74
  """a function that embeds a new query and returns the most probable results"""
@@ -82,8 +77,6 @@ def search(query: str, k: int = 2 ):
82
  # called by talk function that passes prompt
83
 
84
  #print(scores, retrieved_examples)
85
- print("check2A")
86
-
87
 
88
  def format_prompt(prompt,retrieved_documents,k):
89
  """using the retrieved documents we will prompt the model to generate our responses"""
@@ -94,109 +87,114 @@ def format_prompt(prompt,retrieved_documents,k):
94
 
95
  # Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string taht are retreived
96
 
97
- print("check3")
98
-
99
  def talk(prompt, history):
100
  k = 2 # number of retrieved documents
101
  scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
102
  print(retrieved_documents.keys())
 
103
  formatted_prompt = format_prompt(prompt,retrieved_documents,k) # create a new prompt using the retrieved documents
 
104
  print(retrieved_documents['0'])
105
  print(formatted_prompt)
106
  formatted_prompt = formatted_prompt[:600] # to avoid memory issue
107
- # print(retrieved_documents['0'][1]
108
- # print(retrieved_documents['0'][2]
109
  print(formatted_prompt)
110
  messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
111
  # binding the system context and new prompt for LLM
112
  # the chat template structure should be based on text generation model format
113
- print("check3B")
114
- input_ids = tokenizer.apply_chat_template(
115
- messages,
116
- add_generation_prompt=True,
117
- return_tensors="pt"
118
- ).to(model.device)
119
- # tell the model to generate
120
- # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
121
- print("check3C")
122
- outputs = model.generate(
123
- input_ids,
124
- max_new_tokens=300,
125
- eos_token_id=terminators,
126
- do_sample=True,
127
- temperature=0.4,
128
- top_p=0.95,
129
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  # calling the model to generate response based on message/ input
131
  # do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
132
  # temperature controls randomness. more renadomness with higher temperature
133
  # only the tokens comprising the top_p probability mass are considered for responses
134
  # This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
135
- print("check3D")
136
- streamer = TextIteratorStreamer(
137
- tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
138
- )
139
- # stores print-ready text in a queue, to be used by a downstream application as an iterator. removes specail tokens in generated text.
140
- # timeout for text queue. tokenizer for decoding tokens
141
- # called by generate_kwargs
142
- print("check3E")
143
- generate_kwargs = dict(
144
- input_ids= input_ids,
145
- streamer=streamer,
146
- max_new_tokens= 200,
147
- do_sample=True,
148
- top_p=0.95,
149
- temperature=0.4,
150
- eos_token_id=terminators,
151
- )
152
- # send additional parameters to model for generation
153
- print("check3F")
154
- t = Thread(target=model.generate, kwargs=generate_kwargs)
155
  # to process multiple instances
156
- t.start()
 
157
  # start a thread
158
- print("check3G")
159
- outputs = []
160
- for text in streamer:
161
- outputs.append(text)
162
- print(outputs)
163
- yield "".join(outputs)
164
- print("check3H")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
  pd.options.display.max_colwidth = 800
167
-
168
- outputstring = ''.join(outputs)
169
-
170
- global historylog
171
- historynew = {
172
- "Prompt": prompt,
173
- "Output": outputstring
174
- }
175
- historylog.append(historynew)
176
- return historylog
177
- print(historylog)
178
-
179
- # history.update({prompt: outputstring})
180
- # print(history)
181
- #print(memory_string2)
182
- #with open(logfile, 'a', encoding='utf-8') as f:
183
- # f.write(memory_string2)
184
- # f.write('\n')
185
- #f.close()
186
- #print(logfile)
187
- #logfile.push_to_hub("Namitg02/",token = HF_TOKEN)
188
- #memory_panda = pd.DataFrame()
189
- #if len(memory_panda) == 0:
190
- # memory_panda = pd.DataFrame(memory_string)
191
- #else:
192
- # memory_panda = memory_panda.append(memory_string, ignore_index=True)
193
- #print(memory_panda.iloc[[0]])
194
 
195
- #memory_panda.loc[len(memory_panda.index)] = ['prompt', outputstring]
196
- #print(memory_panda.iloc[[1]])
197
- #Logfile = Dataset.from_pandas(memory_panda)
198
- #Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
199
-
200
 
201
  TITLE = "AI Copilot for Diabetes Patients"
202
 
@@ -218,12 +216,11 @@ demo = gr.ChatInterface(
218
  examples=[["what is Diabetes? "]],
219
  title=TITLE,
220
  description=DESCRIPTION,
221
-
222
  )
223
  # launch chatbot and calls the talk function which in turn calls other functions
224
- print("check3I")
225
- print(historylog)
226
- memory_panda = pd.DataFrame(historylog)
227
- Logfile = Dataset.from_pandas(memory_panda)
228
- Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
229
  demo.launch()
 
1
  from datasets import load_dataset
2
  from datasets import Dataset
 
 
3
  from sentence_transformers import SentenceTransformer
4
  import faiss
5
  import time
6
  #import torch
7
  import pandas as pd
8
 
9
+ from transformers import AutoTokenizer, GenerationConfig #, AutoModelForCausalLM
10
+ #from transformers import AutoModelForCausalLM, AutoModel
11
  from transformers import TextIteratorStreamer
12
  from threading import Thread
13
+ from ctransformers import AutoModelForCausalLM, AutoConfig, Config #, AutoTokenizer
14
 
 
15
  from huggingface_hub import Repository, upload_file
16
  import os
17
 
 
24
  "Output": ''
25
  }]
26
 
 
 
 
 
 
 
 
 
 
 
27
  data = load_dataset("Namitg02/Test", split='train', streaming=False)
28
  #Returns a list of dictionaries, each representing a row in the dataset.
29
  length = len(data)
 
37
  data.add_faiss_index("embeddings", custom_index=index)
38
  # adds an index column for the embeddings
39
 
40
+ print("check1")
41
  #question = "How can I reverse Diabetes?"
42
 
43
  SYS_PROMPT = """You are an assistant for answering questions.
 
45
  If you don't know the answer, just say "I do not know." Don't make up an answer."""
46
  # Provides context of how to answer the question
47
 
48
+ llm_model = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
49
+ # TheBloke/Llama-2-7B-Chat-GGML , TinyLlama/TinyLlama-1.1B-Chat-v1.0 , microsoft/Phi-3-mini-4k-instruct, health360/Healix-1.1B-V1-Chat-dDPO
50
+ # TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF and tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf not working, TinyLlama/TinyLlama-1.1B-Chat-v0.6, andrijdavid/TinyLlama-1.1B-Chat-v1.0-GGUF"
51
 
52
+ tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
53
+ #initiate model and tokenizer
 
 
54
 
55
+ generation_config = AutoConfig.from_pretrained(
56
+ "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
57
+ max_new_tokens= 300,
58
+ # do_sample=True,
59
+ # stream = streamer,
60
+ top_p=0.95,
61
+ temperature=0.4
62
+ # eos_token_id=terminators
63
+ )
64
+ # send additional parameters to model for generation
65
 
66
+ model = AutoModelForCausalLM.from_pretrained(llm_model, model_file = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", model_type="llama", gpu_layers=0, config = generation_config)
67
 
68
  def search(query: str, k: int = 2 ):
69
  """a function that embeds a new query and returns the most probable results"""
 
77
  # called by talk function that passes prompt
78
 
79
  #print(scores, retrieved_examples)
 
 
80
 
81
  def format_prompt(prompt,retrieved_documents,k):
82
  """using the retrieved documents we will prompt the model to generate our responses"""
 
87
 
88
  # Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string taht are retreived
89
 
 
 
90
  def talk(prompt, history):
91
  k = 2 # number of retrieved documents
92
  scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
93
  print(retrieved_documents.keys())
94
+ print("check4")
95
  formatted_prompt = format_prompt(prompt,retrieved_documents,k) # create a new prompt using the retrieved documents
96
+ print("check5")
97
  print(retrieved_documents['0'])
98
  print(formatted_prompt)
99
  formatted_prompt = formatted_prompt[:600] # to avoid memory issue
 
 
100
  print(formatted_prompt)
101
  messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
102
  # binding the system context and new prompt for LLM
103
  # the chat template structure should be based on text generation model format
104
+ print("check6")
105
+
106
+ streamer = TextIteratorStreamer(
107
+ tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
 
 
 
 
 
 
 
 
 
 
 
 
108
  )
109
+ # stores print-ready text in a queue, to be used by a downstream application as an iterator. removes special tokens in generated text.
110
+ # timeout for text queue. tokenizer for decoding tokens
111
+ # called by generate_kwargs
112
+
113
+ terminators = [
114
+ tokenizer.eos_token_id, # End-of-Sequence Token that indicates where the model should consider the text sequence to be complete
115
+ tokenizer.convert_tokens_to_ids("<|eot_id|>") # Converts a token strings in a single/ sequence of integer id using the vocabulary
116
+ ]
117
+ # indicates the end of a sequence
118
+
119
+ # input_ids = tokenizer.apply_chat_template(
120
+ # "hello",
121
+ # add_generation_prompt=True,
122
+ # return_tensors="pt"
123
+ # )
124
+ # preparing tokens for model input
125
+ # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
126
+ # print(input_ids)
127
+ # print("check7")
128
+ # print(input_ids.dtype)
129
+
130
+ # generate_kwargs = dict(
131
+ # tokens= input_ids) #,
132
+ # streamer=streamer,
133
+ # do_sample=True,
134
+ # eos_token_id=terminators,
135
+ # )
136
+
137
+ # outputs = model.generate(
138
+ # )
139
+ # print(outputs)
140
  # calling the model to generate response based on message/ input
141
  # do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
142
  # temperature controls randomness. more renadomness with higher temperature
143
  # only the tokens comprising the top_p probability mass are considered for responses
144
  # This output is a data structure containing all the information returned by generate(), but that can also be used as tuple or dictionary.
145
+ #
146
+
147
+ # print("check10")
148
+ # t = Thread(target=model.generate, kwargs=generate_kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  # to process multiple instances
150
+ # t.start()
151
+ # print("check11")
152
  # start a thread
153
+ outputs = []
154
+ input_ids = llm.tokenize(*messages)
155
+
156
+ start = time.time()
157
+ NUM_TOKENS=0
158
+ print('-'*4+'Start Generation'+'-'*4)
159
+ for token in model.generate(input_ids):
160
+ print(model.detokenize(input_ids), end='', flush=True)
161
+ NUM_TOKENS+=1
162
+ time_generate = time.time() - start
163
+ print('\n')
164
+ print('-'*4+'End Generation'+'-'*4)
165
+ print(f'Num of generated tokens: {NUM_TOKENS}')
166
+ print(f'Time for complete generation: {time_generate}s')
167
+ print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
168
+ print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
169
+
170
+
171
+ #outputtokens = model.generate(input_ids)
172
+ print("check9")
173
+ #print(outputtokens)
174
+
175
+ #outputs = model.detokenize(outputtokens, decode = True)
176
+ #print(outputs)
177
+ # for token in model.generate(input_ids):
178
+ # print(model.detokenize(token))
179
+ # outputs.append(model.detokenize(token))
180
+ # output = model.detokenize(token)
181
+ # print(outputs)
182
+ # yield "".join(outputs)
183
+ # print("check12")
184
 
185
  pd.options.display.max_colwidth = 800
186
+ print("check13")
187
+ # outputstring = ''.join(outputs)
188
+
189
+ # global historylog
190
+ # historynew = {
191
+ # "Prompt": prompt,
192
+ # "Output": outputstring
193
+ # }
194
+ # historylog.append(historynew)
195
+ # return historylog
196
+ # print(historylog)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
 
 
 
 
 
198
 
199
  TITLE = "AI Copilot for Diabetes Patients"
200
 
 
216
  examples=[["what is Diabetes? "]],
217
  title=TITLE,
218
  description=DESCRIPTION,
 
219
  )
220
  # launch chatbot and calls the talk function which in turn calls other functions
221
+ print("check14")
222
+ #print(historylog)
223
+ #memory_panda = pd.DataFrame(historylog)
224
+ #Logfile = Dataset.from_pandas(memory_panda)
225
+ #Logfile.push_to_hub("Namitg02/Logfile",token = HF_TOKEN)
226
  demo.launch()