Namitg02 commited on
Commit
1db302e
·
verified ·
1 Parent(s): 690f0a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -22
app.py CHANGED
@@ -5,6 +5,7 @@ import faiss
5
  import time
6
  #import torch
7
  import pandas as pd
 
8
 
9
  from transformers import AutoTokenizer, GenerationConfig #, AutoModelForCausalLM
10
  #from transformers import AutoModelForCausalLM, AutoModel
@@ -64,8 +65,18 @@ generation_config = AutoConfig.from_pretrained(
64
  )
65
  # send additional parameters to model for generation
66
 
67
- model = AutoModelForCausalLM.from_pretrained(llm_model, model_file = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", model_type="llama", gpu_layers=0, config = generation_config)
68
-
 
 
 
 
 
 
 
 
 
 
69
  def search(query: str, k: int = 2 ):
70
  """a function that embeds a new query and returns the most probable results"""
71
  embedded_query = embedding_model.encode(query) # create embedding of a new query
@@ -117,11 +128,13 @@ def talk(prompt, history):
117
  ]
118
  # indicates the end of a sequence
119
 
120
- input_ids = tokenizer.apply_chat_template(
121
- messages,
122
- add_generation_prompt=True,
123
- return_tensors="pt"
124
- )
 
 
125
  # preparing tokens for model input
126
  # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
127
  # print(input_ids)
@@ -152,24 +165,26 @@ def talk(prompt, history):
152
  # print("check11")
153
  # start a thread
154
  outputs = []
155
- print(messages)
156
- print(*messages)
157
  print(model.tokenize(messages))
 
 
 
158
  # input_ids = tokenizer(*messages)
159
  # print(model.generate(tensor([[ 1, 529, 29989, 5205, 29989]])))
160
- start = time.time()
161
- NUM_TOKENS=0
162
- print('-'*4+'Start Generation'+'-'*4)
163
- for token in model.generate(input_ids):
164
- print(model.detokenize(input_ids), end='', flush=True)
165
- NUM_TOKENS+=1
166
- time_generate = time.time() - start
167
- print('\n')
168
- print('-'*4+'End Generation'+'-'*4)
169
- print(f'Num of generated tokens: {NUM_TOKENS}')
170
- print(f'Time for complete generation: {time_generate}s')
171
- print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
172
- print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
173
 
174
 
175
  #outputtokens = model.generate(input_ids)
 
5
  import time
6
  #import torch
7
  import pandas as pd
8
+ from llama_cpp import Llama
9
 
10
  from transformers import AutoTokenizer, GenerationConfig #, AutoModelForCausalLM
11
  #from transformers import AutoModelForCausalLM, AutoModel
 
65
  )
66
  # send additional parameters to model for generation
67
 
68
+ #model = llama_cpp.Llama(model_path = tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf AutoModelForCausalLM.from_pretrained(llm_model, model_file = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", model_type="llama", gpu_layers=0, config = generation_config)
69
+ model = LlamaCpp(
70
+ model_path="./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
71
+ chat_format="llama-2"
72
+ n_gpu_layers = 0,
73
+ temperature=0.75,
74
+ max_tokens=500,
75
+ top_p=0.95,
76
+ # callback_manager=callback_manager,
77
+ # verbose=True, # Verbose is required to pass to the callback manager
78
+ )
79
+
80
  def search(query: str, k: int = 2 ):
81
  """a function that embeds a new query and returns the most probable results"""
82
  embedded_query = embedding_model.encode(query) # create embedding of a new query
 
128
  ]
129
  # indicates the end of a sequence
130
 
131
+ model_input = model.create_chat_completion(messages = messages)
132
+
133
+ # input_ids = tokenizer.apply_chat_template(
134
+ # messages,
135
+ # add_generation_prompt=True,
136
+ # return_tensors="pt"
137
+ # )
138
  # preparing tokens for model input
139
  # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
140
  # print(input_ids)
 
165
  # print("check11")
166
  # start a thread
167
  outputs = []
168
+ print(model_input)
 
169
  print(model.tokenize(messages))
170
+ tokens = model.tokenize(messages)
171
+ for token in model.generate(tokens):
172
+ print(model.detokenize([token]))
173
  # input_ids = tokenizer(*messages)
174
  # print(model.generate(tensor([[ 1, 529, 29989, 5205, 29989]])))
175
+ # start = time.time()
176
+ # NUM_TOKENS=0
177
+ # print('-'*4+'Start Generation'+'-'*4)
178
+ # for token in model.generate(input_ids):
179
+ # print(model.detokenize(input_ids), end='', flush=True)
180
+ # NUM_TOKENS+=1
181
+ # time_generate = time.time() - start
182
+ # print('\n')
183
+ # print('-'*4+'End Generation'+'-'*4)
184
+ # print(f'Num of generated tokens: {NUM_TOKENS}')
185
+ # print(f'Time for complete generation: {time_generate}s')
186
+ # print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
187
+ # print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
188
 
189
 
190
  #outputtokens = model.generate(input_ids)