sithumonline commited on
Commit
61fa1d3
·
verified ·
1 Parent(s): 2028639
Files changed (1) hide show
  1. app.py +21 -20
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import os
2
  import gradio as gr
3
  import copy
4
- import time
5
- import llama_cpp
6
  from llama_cpp import Llama
7
  from huggingface_hub import hf_hub_download
8
 
@@ -14,18 +12,17 @@ llm = Llama(
14
  ),
15
  n_ctx=2048,
16
  n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
17
- )
18
-
19
- history = []
20
-
21
- system_message = """
22
- You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
23
-
24
- If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
25
- """
26
 
27
 
28
- def generate_text(message, history):
 
 
 
 
 
 
 
29
  temp = ""
30
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
31
  for interaction in history:
@@ -35,11 +32,11 @@ def generate_text(message, history):
35
 
36
  output = llm(
37
  input_prompt,
38
- temperature=0.15,
39
- top_p=0.1,
40
- top_k=40,
41
  repeat_penalty=1.1,
42
- max_tokens=1024,
43
  stop=[
44
  "<|prompter|>",
45
  "<|endoftext|>",
@@ -55,15 +52,19 @@ def generate_text(message, history):
55
  temp += stream["choices"][0]["text"]
56
  yield temp
57
 
58
- history = ["init", input_prompt]
59
-
60
 
61
  demo = gr.ChatInterface(
62
  generate_text,
63
  title="llama-cpp-python on GPU",
64
  description="Running LLM with https://github.com/abetlen/llama-cpp-python",
65
- examples=["tell me everything about llamas"],
66
- cache_examples=True,
 
 
 
 
 
 
67
  retry_btn=None,
68
  undo_btn="Delete Previous",
69
  clear_btn="Clear",
 
1
  import os
2
  import gradio as gr
3
  import copy
 
 
4
  from llama_cpp import Llama
5
  from huggingface_hub import hf_hub_download
6
 
 
12
  ),
13
  n_ctx=2048,
14
  n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
15
+ )
 
 
 
 
 
 
 
 
16
 
17
 
18
+ def generate_text(
19
+ message,
20
+ history: list[tuple[str, str]],
21
+ system_message,
22
+ max_tokens,
23
+ temperature,
24
+ top_p,
25
+ ):
26
  temp = ""
27
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
28
  for interaction in history:
 
32
 
33
  output = llm(
34
  input_prompt,
35
+ temperature=temperature,
36
+ top_p=top_p,
37
+ top_k=40,
38
  repeat_penalty=1.1,
39
+ max_tokens=max_tokens,
40
  stop=[
41
  "<|prompter|>",
42
  "<|endoftext|>",
 
52
  temp += stream["choices"][0]["text"]
53
  yield temp
54
 
 
 
55
 
56
  demo = gr.ChatInterface(
57
  generate_text,
58
  title="llama-cpp-python on GPU",
59
  description="Running LLM with https://github.com/abetlen/llama-cpp-python",
60
+ examples=[
61
+ ['How to setup a human base on Mars? Give short answer.'],
62
+ ['Explain theory of relativity to me like I’m 8 years old.'],
63
+ ['What is 9,000 * 9,000?'],
64
+ ['Write a pun-filled happy birthday message to my friend Alex.'],
65
+ ['Justify why a penguin might make a good king of the jungle.']
66
+ ],
67
+ cache_examples=False,
68
  retry_btn=None,
69
  undo_btn="Delete Previous",
70
  clear_btn="Clear",