suraj commited on
Commit
ff0a367
1 Parent(s): 48975c6
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. .gitignore +2 -0
  3. __init__.py +0 -65
  4. app.py +130 -404
  5. requirements.txt +1 -1
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ env/*
2
+ env/pyvenv.cfg
__init__.py DELETED
@@ -1,65 +0,0 @@
1
- import os
2
- from langchain.document_loaders import (
3
- CSVLoader,
4
- EverNoteLoader,
5
- PDFMinerLoader,
6
- TextLoader,
7
- UnstructuredEPubLoader,
8
- UnstructuredHTMLLoader,
9
- UnstructuredMarkdownLoader,
10
- UnstructuredODTLoader,
11
- UnstructuredPowerPointLoader,
12
- UnstructuredWordDocumentLoader,
13
- )
14
-
15
-
16
- FAVICON_PATH: str = 'https://modishcard.com/app/assets/icons/ModishCard_Logo6-02.svg'
17
- SYSTEM_PROMPT: str = "You are Saiga, a Englis-speaking automated assistant. You talk to people and help them."
18
- SYSTEM_TOKEN: int = 1788
19
- USER_TOKEN: int = 1404
20
- BOT_TOKEN: int = 9225
21
- LINEBREAK_TOKEN: int = 13
22
-
23
- ROLE_TOKENS: dict = {
24
- "user": USER_TOKEN,
25
- "bot": BOT_TOKEN,
26
- "system": SYSTEM_TOKEN
27
- }
28
-
29
- LOADER_MAPPING: dict = {
30
- ".csv": (CSVLoader, {}),
31
- ".doc": (UnstructuredWordDocumentLoader, {}),
32
- ".docx": (UnstructuredWordDocumentLoader, {}),
33
- ".enex": (EverNoteLoader, {}),
34
- ".epub": (UnstructuredEPubLoader, {}),
35
- ".html": (UnstructuredHTMLLoader, {}),
36
- ".md": (UnstructuredMarkdownLoader, {}),
37
- ".odt": (UnstructuredODTLoader, {}),
38
- ".pdf": (PDFMinerLoader, {}),
39
- ".ppt": (UnstructuredPowerPointLoader, {}),
40
- ".pptx": (UnstructuredPowerPointLoader, {}),
41
- ".txt": (TextLoader, {"encoding": "utf8"}),
42
- }
43
-
44
-
45
- DICT_REPO_AND_MODELS: dict = {
46
- "https://huggingface.co/MaziyarPanahi/Qwen2-1.5B-Instruct-GGUF/resolve/main/Qwen2-1.5B-Instruct.Q8_0.gguf":
47
- "MaziyarPanahi/Qwen2-1.5B-Instruct.Q8_0.gguf",
48
- }
49
-
50
- EMBEDDER_NAME: str = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
51
-
52
- MAX_NEW_TOKENS: int = 1500
53
-
54
- ABS_PATH = os.path.dirname(os.path.abspath(__file__))
55
- MODELS_DIR = os.path.join(ABS_PATH, "../models")
56
- AUTH_FILE = os.path.join(ABS_PATH, "auth.csv")
57
-
58
-
59
- BLOCK_CSS = """
60
-
61
- #buttons button {
62
- min-width: min(120px,100%);
63
- }
64
-
65
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,412 +1,138 @@
1
- import tempfile
2
- import itertools
3
  import gradio as gr
4
- from __init__ import *
5
  from llama_cpp import Llama
6
- from chromadb.config import Settings
7
- from typing import List, Optional, Union
8
- from langchain.vectorstores import Chroma
9
- from langchain.docstore.document import Document
10
- from huggingface_hub.file_download import http_get
11
- from langchain.embeddings import HuggingFaceEmbeddings
12
- from langchain.text_splitter import RecursiveCharacterTextSplitter
13
-
14
-
15
- class LocalChatGPT:
16
- def __init__(self):
17
- self.llama_model: Optional[Llama] = None
18
- self.embeddings: HuggingFaceEmbeddings = self.initialize_app()
19
-
20
- def initialize_app(self) -> HuggingFaceEmbeddings:
21
- """
22
- Load all models from the list
23
- :return:
24
- """
25
- os.makedirs(MODELS_DIR, exist_ok=True)
26
- model_url, model_name = list(DICT_REPO_AND_MODELS.items())[0]
27
- final_model_path = os.path.join(MODELS_DIR, model_name)
28
- os.makedirs("/".join(final_model_path.split("/")[:-1]), exist_ok=True)
29
-
30
- if not os.path.exists(final_model_path):
31
- with open(final_model_path, "wb") as f:
32
- http_get(model_url, f)
33
-
34
- self.llama_model = Llama(
35
- model_path=final_model_path,
36
- n_ctx=2000,
37
- n_parts=1,
38
- )
39
-
40
- return HuggingFaceEmbeddings(model_name=EMBEDDER_NAME, cache_folder=MODELS_DIR)
41
-
42
- def load_model(self, model_name):
43
- """
44
-
45
- :param model_name:
46
- :return:
47
- """
48
- final_model_path = os.path.join(MODELS_DIR, model_name)
49
- os.makedirs("/".join(final_model_path.split("/")[:-1]), exist_ok=True)
50
-
51
- if not os.path.exists(final_model_path):
52
- with open(final_model_path, "wb") as f:
53
- if model_url := [i for i in DICT_REPO_AND_MODELS if DICT_REPO_AND_MODELS[i] == model_name]:
54
- http_get(model_url[0], f)
55
-
56
- self.llama_model = Llama(
57
- model_path=final_model_path,
58
- n_ctx=2000,
59
- n_parts=1,
60
- )
61
- return model_name
62
-
63
- @staticmethod
64
- def load_single_document(file_path: str) -> Document:
65
- """
66
- Upload one document.
67
- :param file_path:
68
- :return:
69
- """
70
- ext: str = "." + file_path.rsplit(".", 1)[-1]
71
- assert ext in LOADER_MAPPING
72
- loader_class, loader_args = LOADER_MAPPING[ext]
73
- loader = loader_class(file_path, **loader_args)
74
- return loader.load()[0]
75
-
76
- @staticmethod
77
- def get_message_tokens(model: Llama, role: str, content: str) -> list:
78
- """
79
-
80
- :param model:
81
- :param role:
82
- :param content:
83
- :return:
84
- """
85
- message_tokens: list = model.tokenize(content.encode("utf-8"))
86
- message_tokens.insert(1, ROLE_TOKENS[role])
87
- message_tokens.insert(2, LINEBREAK_TOKEN)
88
- message_tokens.append(model.token_eos())
89
- return message_tokens
90
-
91
- def get_system_tokens(self, model: Llama) -> list:
92
- """
93
-
94
- :param model:
95
- :return:
96
- """
97
- system_message: dict = {"role": "system", "content": SYSTEM_PROMPT}
98
- return self.get_message_tokens(model, **system_message)
99
-
100
- @staticmethod
101
- def upload_files(files: List[tempfile.TemporaryFile]) -> List[str]:
102
- """
103
-
104
- :param files:
105
- :return:
106
- """
107
- return [f.name for f in files]
108
-
109
- @staticmethod
110
- def process_text(text: str) -> Optional[str]:
111
- """
112
-
113
- :param text:
114
- :return:
115
- """
116
- lines: list = text.split("\n")
117
- lines = [line for line in lines if len(line.strip()) > 2]
118
- text = "\n".join(lines).strip()
119
- return None if len(text) < 10 else text
120
-
121
- @staticmethod
122
- def update_text_db(
123
- db: Optional[Chroma],
124
- fixed_documents: List[Document],
125
- ids: List[str]
126
- ) -> Union[Optional[Chroma], str]:
127
- if db:
128
- data: dict = db.get()
129
- files_db = {dict_data['source'].split('/')[-1] for dict_data in data["metadatas"]}
130
- files_load = {dict_data.metadata["source"].split('/')[-1] for dict_data in fixed_documents}
131
- if files_load == files_db:
132
- # db.delete([item for item in data['ids'] if item not in ids])
133
- # db.update_documents(ids, fixed_documents)
134
-
135
- db.delete(data['ids'])
136
- db.add_texts(
137
- texts=[doc.page_content for doc in fixed_documents],
138
- metadatas=[doc.metadata for doc in fixed_documents],
139
- ids=ids
140
- )
141
- file_warning = f"Uploaded {len(fixed_documents)} fragments! You can ask questions"
142
- return db, file_warning
143
-
144
- def build_index(
145
- self,
146
- file_paths: List[str],
147
- db: Optional[Chroma],
148
- chunk_size: int,
149
- chunk_overlap: int
150
- ):
151
- """
152
-
153
- :param file_paths:
154
- :param db:
155
- :param chunk_size:
156
- :param chunk_overlap:
157
- :return:
158
- """
159
- documents: List[Document] = [self.load_single_document(path) for path in file_paths]
160
- text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter(
161
- chunk_size=chunk_size, chunk_overlap=chunk_overlap
162
- )
163
- documents = text_splitter.split_documents(documents)
164
- fixed_documents: List[Document] = []
165
- for doc in documents:
166
- doc.page_content = self.process_text(doc.page_content)
167
- if not doc.page_content:
168
- continue
169
- fixed_documents.append(doc)
170
-
171
- ids: List[str] = [
172
- f"{path.split('/')[-1].replace('.txt', '')}{i}"
173
- for path, i in itertools.product(file_paths, range(1, len(fixed_documents) + 1))
174
- ]
175
-
176
- self.update_text_db(db, fixed_documents, ids)
177
-
178
- db = Chroma.from_documents(
179
- documents=fixed_documents,
180
- embedding=self.embeddings,
181
- ids=ids,
182
- client_settings=Settings(
183
- anonymized_telemetry=False,
184
- persist_directory="db"
185
- )
186
- )
187
- file_warning = f"Uploaded {len(fixed_documents)} fragments! You can ask questions."
188
- return db, file_warning
189
-
190
- @staticmethod
191
- def user(message, history):
192
- new_history = history + [[message, None]]
193
- return "", new_history
194
-
195
- @staticmethod
196
- def regenerate_response(history):
197
- """
198
-
199
- :param history:
200
- :return:
201
- """
202
- return "", history
203
-
204
- @staticmethod
205
- def retrieve(history, db: Optional[Chroma], retrieved_docs):
206
- """
207
-
208
- :param history:
209
- :param db:
210
- :param retrieved_docs:
211
- :return:
212
- """
213
- if db:
214
- last_user_message = history[-1][0]
215
- try:
216
- docs = db.similarity_search(last_user_message, k=4)
217
- # retriever = db.as_retriever(search_kwargs={"k": k_documents})
218
- # docs = retriever.get_relevant_documents(last_user_message)
219
- except RuntimeError:
220
- docs = db.similarity_search(last_user_message, k=1)
221
- # retriever = db.as_retriever(search_kwargs={"k": 1})
222
- # docs = retriever.get_relevant_documents(last_user_message)
223
- source_docs = set()
224
- for doc in docs:
225
- for content in doc.metadata.values():
226
- source_docs.add(content.split("/")[-1])
227
- retrieved_docs = "\n\n".join([doc.page_content for doc in docs])
228
- retrieved_docs = f"A document- {''.join(list(source_docs))}.\n\n{retrieved_docs}"
229
- return retrieved_docs
230
-
231
- def bot(self, history, retrieved_docs):
232
- """
233
-
234
- :param history:
235
- :param retrieved_docs:
236
- :return:
237
- """
238
- if not history:
239
- return
240
- tokens = self.get_system_tokens(self.llama_model)[:]
241
- tokens.append(LINEBREAK_TOKEN)
242
-
243
- for user_message, bot_message in history[:-1]:
244
- message_tokens = self.get_message_tokens(model=self.llama_model, role="user", content=user_message)
245
- tokens.extend(message_tokens)
246
-
247
- last_user_message = history[-1][0]
248
- if retrieved_docs:
249
- last_user_message = f"Context: {retrieved_docs}\n\nUsing context, answer the question:" \
250
- f"{last_user_message}"
251
- message_tokens = self.get_message_tokens(model=self.llama_model, role="user", content=last_user_message)
252
- tokens.extend(message_tokens)
253
-
254
- role_tokens = [self.llama_model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
255
- tokens.extend(role_tokens)
256
- generator = self.llama_model.generate(
257
- tokens,
258
- top_k=30,
259
- top_p=0.9,
260
- temp=0.1
261
- )
262
-
263
- partial_text = ""
264
- for i, token in enumerate(generator):
265
- if token == self.llama_model.token_eos() or (MAX_NEW_TOKENS is not None and i >= MAX_NEW_TOKENS):
266
- break
267
- partial_text += self.llama_model.detokenize([token]).decode("utf-8", "ignore")
268
- history[-1][1] = partial_text
269
- yield history
270
-
271
- def run(self):
272
- """
273
-
274
- :return:
275
- """
276
- with gr.Blocks(theme=gr.themes.Soft(), css=BLOCK_CSS) as demo:
277
- db: Optional[Chroma] = gr.State(None)
278
- favicon = f'<img src="{FAVICON_PATH}" width="48px" style="display: inline">'
279
  gr.Markdown(
280
- f"""<h1><center>{favicon} GPT-based text assistant</center></h1>"""
281
- )
282
-
283
- with gr.Row(elem_id="model_selector_row"):
284
- models: list = list(DICT_REPO_AND_MODELS.values())
285
- model_selector = gr.Dropdown(
286
- choices=models,
287
- value=models[0] if models else "",
288
- interactive=True,
289
- show_label=False,
290
- container=False,
291
- )
292
-
293
- with gr.Row():
294
- with gr.Column(scale=5):
295
- chatbot = gr.Chatbot(label="Dialogue", height=400)
296
- with gr.Column(min_width=200, scale=4):
297
- retrieved_docs = gr.Textbox(
298
- label="Extracted fragments",
299
- placeholder="Will appear after asking questions",
300
- interactive=False
301
- )
302
-
303
- with gr.Row():
304
- with gr.Column(scale=20):
305
- msg = gr.Textbox(
306
- label="send a message",
307
- show_label=False,
308
- placeholder="send a message",
309
- container=False
310
- )
311
- with gr.Column(scale=3, min_width=100):
312
- submit = gr.Button("📤 Send", variant="primary")
313
-
314
- with gr.Row():
315
- # gr.Button(value="👍 Понравилось")
316
- # gr.Button(value="👎 Не понравилось")
317
- stop = gr.Button(value="⛔ Stop")
318
- regenerate = gr.Button(value="🔄 Repeat")
319
- clear = gr.Button(value="🗑️ Clear")
320
-
321
- # # Upload files
322
- # file_output.upload(
323
- # fn=self.upload_files,
324
- # inputs=[file_output],
325
- # outputs=[file_paths],
326
- # queue=True,
327
- # ).success(
328
- # fn=self.build_index,
329
- # inputs=[file_paths, db, chunk_size, chunk_overlap],
330
- # outputs=[db, file_warning],
331
- # queue=True
332
- # )
333
-
334
- model_selector.change(
335
- fn=self.load_model,
336
- inputs=[model_selector],
337
- outputs=[model_selector]
338
- )
339
-
340
- # Pressing Enter
341
- submit_event = msg.submit(
342
- fn=self.user,
343
- inputs=[msg, chatbot],
344
- outputs=[msg, chatbot],
345
- queue=False,
346
- ).success(
347
- fn=self.retrieve,
348
- inputs=[chatbot, db, retrieved_docs],
349
- outputs=[retrieved_docs],
350
- queue=True,
351
- ).success(
352
- fn=self.bot,
353
- inputs=[chatbot, retrieved_docs],
354
- outputs=chatbot,
355
- queue=True,
356
- )
357
-
358
- # Pressing the button
359
- submit_click_event = submit.click(
360
- fn=self.user,
361
- inputs=[msg, chatbot],
362
- outputs=[msg, chatbot],
363
- queue=False,
364
- ).success(
365
- fn=self.retrieve,
366
- inputs=[chatbot, db, retrieved_docs],
367
- outputs=[retrieved_docs],
368
- queue=True,
369
- ).success(
370
- fn=self.bot,
371
- inputs=[chatbot, retrieved_docs],
372
- outputs=chatbot,
373
- queue=True,
374
- )
375
-
376
- # Stop generation
377
- stop.click(
378
- fn=None,
379
- inputs=None,
380
- outputs=None,
381
- cancels=[submit_event, submit_click_event],
382
- queue=False,
383
- )
384
-
385
- # Regenerate
386
- regenerate.click(
387
- fn=self.regenerate_response,
388
- inputs=[chatbot],
389
- outputs=[msg, chatbot],
390
- queue=False,
391
- ).success(
392
- fn=self.retrieve,
393
- inputs=[chatbot, db, retrieved_docs],
394
- outputs=[retrieved_docs],
395
- queue=True,
396
- ).success(
397
- fn=self.bot,
398
- inputs=[chatbot, retrieved_docs],
399
- outputs=chatbot,
400
- queue=True,
401
- )
402
 
403
- # Clear history
404
- clear.click(lambda: None, None, chatbot, queue=False)
405
 
406
- demo.queue(max_size=128, default_concurrency_limit=10, api_open=False)
407
- demo.launch(server_name="0.0.0.0", max_threads=200)
 
 
 
408
 
409
 
410
  if __name__ == "__main__":
411
- local_chat_gpt = LocalChatGPT()
412
- local_chat_gpt.run()
 
 
 
1
  import gradio as gr
2
+ import os
3
  from llama_cpp import Llama
4
+ import datetime
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ #MODEL SETTINGS also for DISPLAY
8
+ convHistory = ''
9
+ modelfile = hf_hub_download(
10
+ repo_id=os.environ.get("REPO_ID", "slasiyal/deepseek-coder-1.3b-instruct.gguf"),
11
+ filename=os.environ.get("MODEL_FILE", "deepseek-coder-1.3b-instruct.gguf"),
12
+ )
13
+ repetitionpenalty = 1.15
14
+ contextlength=4096
15
+ logfile = 'logs.txt'
16
+ print("loading model...")
17
+ stt = datetime.datetime.now()
18
+ # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
19
+ llm = Llama(
20
+ model_path=modelfile, # Download the model file first
21
+ n_ctx=contextlength, # The max sequence length to use - note that longer sequence lengths require much more resources
22
+ #n_threads=2, # The number of CPU threads to use, tailor to your system and the resulting performance
23
+ )
24
+ dt = datetime.datetime.now() - stt
25
+ print(f"Model loaded in {dt}")
26
+
27
+ def writehistory(text):
28
+ with open(logfile, 'a') as f:
29
+ f.write(text)
30
+ f.write('\n')
31
+ f.close()
32
+
33
+ """
34
+ gr.themes.Base()
35
+ gr.themes.Default()
36
+ gr.themes.Glass()
37
+ gr.themes.Monochrome()
38
+ gr.themes.Soft()
39
+ """
40
+ def combine(a, b, c, d,e,f):
41
+ global convHistory
42
+ import datetime
43
+ SYSTEM_PROMPT = f"""{a}
44
+
45
+
46
+ """
47
+ temperature = c
48
+ max_new_tokens = d
49
+ repeat_penalty = f
50
+ top_p = e
51
+ prompt = f"<|user|>\n{b}<|endoftext|>\n<|assistant|>"
52
+ start = datetime.datetime.now()
53
+ generation = ""
54
+ delta = ""
55
+ prompt_tokens = f"Prompt Tokens: {len(llm.tokenize(bytes(prompt,encoding='utf-8')))}"
56
+ generated_text = ""
57
+ answer_tokens = ''
58
+ total_tokens = ''
59
+ for character in llm(prompt,
60
+ max_tokens=max_new_tokens,
61
+ stop=["</s>"],
62
+ temperature = temperature,
63
+ repeat_penalty = repeat_penalty,
64
+ top_p = top_p, # Example stop token - not necessarily correct for this specific model! Please check before using.
65
+ echo=False,
66
+ stream=True):
67
+ generation += character["choices"][0]["text"]
68
+
69
+ answer_tokens = f"Out Tkns: {len(llm.tokenize(bytes(generation,encoding='utf-8')))}"
70
+ total_tokens = f"Total Tkns: {len(llm.tokenize(bytes(prompt,encoding='utf-8'))) + len(llm.tokenize(bytes(generation,encoding='utf-8')))}"
71
+ delta = datetime.datetime.now() - start
72
+ yield generation, delta, prompt_tokens, answer_tokens, total_tokens
73
+ timestamp = datetime.datetime.now()
74
+ logger = f"""time: {timestamp}\n Temp: {temperature} - MaxNewTokens: {max_new_tokens} - RepPenalty: 1.5 \nPROMPT: \n{prompt}\nStableZephyr3B: {generation}\nGenerated in {delta}\nPromptTokens: {prompt_tokens} Output Tokens: {answer_tokens} Total Tokens: {total_tokens}\n\n---\n\n"""
75
+ writehistory(logger)
76
+ convHistory = convHistory + prompt + "\n" + generation + "\n"
77
+ print(convHistory)
78
+ return generation, delta, prompt_tokens, answer_tokens, total_tokens
79
+ #return generation, delta
80
+
81
+
82
+ # MAIN GRADIO INTERFACE
83
+ with gr.Blocks(theme='Medguy/base2') as demo: #theme=gr.themes.Glass() #theme='remilia/Ghostly'
84
+ #TITLE SECTION
85
+ with gr.Row(variant='compact'):
86
+ with gr.Column(scale=12):
87
+ gr.HTML("<center>"
88
+ + "<h3>Prompt Engineering Playground!</h3>"
89
+ + "<h1>🐦 StableLM-Zephyr-3B - 4K context window</h2></center>")
90
+ gr.Image(value='https://github.com/fabiomatricardi/GradioStudies/raw/main/20231205/logo-banner-StableZephyr.jpg', height=95, show_label = False,
91
+ show_download_button = False, container = False)
92
+ # INTERACTIVE INFOGRAPHIC SECTION
93
+ with gr.Row():
94
+ with gr.Column(min_width=80):
95
+ gentime = gr.Textbox(value="", placeholder="Generation Time:", min_width=50, show_label=False)
96
+ with gr.Column(min_width=80):
97
+ prompttokens = gr.Textbox(value="", placeholder="Prompt Tkn:", min_width=50, show_label=False)
98
+ with gr.Column(min_width=80):
99
+ outputokens = gr.Textbox(value="", placeholder="Output Tkn:", min_width=50, show_label=False)
100
+ with gr.Column(min_width=80):
101
+ totaltokens = gr.Textbox(value="", placeholder="Total Tokens:", min_width=50, show_label=False)
102
+
103
+ # PLAYGROUND INTERFACE SECTION
104
+ with gr.Row():
105
+ with gr.Column(scale=1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  gr.Markdown(
107
+ f"""
108
+ ### Tunning Parameters""")
109
+ temp = gr.Slider(label="Temperature",minimum=0.0, maximum=1.0, step=0.01, value=0.42)
110
+ top_p = gr.Slider(label="Top_P",minimum=0.0, maximum=1.0, step=0.01, value=0.8)
111
+ repPen = gr.Slider(label="Repetition Penalty",minimum=0.0, maximum=4.0, step=0.01, value=1.2)
112
+ max_len = gr.Slider(label="Maximum output lenght", minimum=10,maximum=(contextlength-500),step=2, value=900)
113
+ gr.Markdown(
114
+ """
115
+ Fill the System Prompt and User Prompt
116
+ And then click the Button below
117
+ """)
118
+ btn = gr.Button(value="🐦 Generate", variant='primary')
119
+ gr.Markdown(
120
+ f"""
121
+ - **Prompt Template**: OpenChat 🐦
122
+ - **Repetition Penalty**: {repetitionpenalty}
123
+ - **Context Lenght**: {contextlength} tokens
124
+ - **LLM Engine**: CTransformers
125
+ - **Model**: 🐦 StarlingLM-7b
126
+ - **Log File**: {logfile}
127
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
 
 
129
 
130
+ with gr.Column(scale=4):
131
+ txt = gr.Textbox(label="System Prompt", value = "", placeholder = "This models does not have any System prompt...",lines=1, interactive = False)
132
+ txt_2 = gr.Textbox(label="User Prompt", lines=6)
133
+ txt_3 = gr.Textbox(value="", label="Output", lines = 13, show_copy_button=True)
134
+ btn.click(combine, inputs=[txt, txt_2,temp,max_len,top_p,repPen], outputs=[txt_3,gentime,prompttokens,outputokens,totaltokens])
135
 
136
 
137
  if __name__ == "__main__":
138
+ demo.launch(inbrowser=True)
 
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- llama-cpp-python==0.2.18
2
  langchain==0.0.331
3
  huggingface-hub==0.17.3
4
  chromadb==0.4.18
 
1
+ llama-cpp-python
2
  langchain==0.0.331
3
  huggingface-hub==0.17.3
4
  chromadb==0.4.18