zac commited on
Commit
a0c6b2f
·
1 Parent(s): b689e2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -27
app.py CHANGED
@@ -7,38 +7,16 @@ from huggingface_hub import hf_hub_download #load from huggingfaces
7
 
8
 
9
  llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/Vigogne-2-7B-Instruct-GGML", filename="vigogne-2-7b-instruct.ggmlv3.q4_1.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
10
- chat_history = []
11
 
12
- def generate_text(message,history):
 
 
 
13
 
14
- if len(history) > 0:
15
- user_input, bot_response = history[-1] # Get the latest pair from history
16
- chat_history.append([user_input, message])
17
- else:
18
- chat_history.append([message, ""]) # If history is empty, just add the user input
19
-
20
- input_text = message
21
- output = llm(f"Q: {input_text} \n A:", max_tokens=521, stop=["Q:", "\n"], echo=True)
22
- response = output['choices'][0]['text']
23
-
24
- # Append the bot response to the chat history
25
- chat_history[-1][1] = response
26
-
27
- return response
28
-
29
 
30
  input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text")
31
  output_text = gr.outputs.Textbox(label="Output text")
32
 
33
- description = " currently running ggml models with llama.cpp implementation in python [https://github.com/abetlen/llama-cpp-python]"
34
-
35
- examples = [
36
- ["What is the capital of France? ", "The capital of France is Paris."],
37
- ["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."],
38
- ["What is the square root of 64?", "The square root of 64 is 8."]
39
- ]
40
-
41
- demo = gr.ChatInterface(random_response).launch()
42
  demo.queue()
43
  demo.launch()
44
-
 
7
 
8
 
9
  llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/Vigogne-2-7B-Instruct-GGML", filename="vigogne-2-7b-instruct.ggmlv3.q4_1.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
 
10
 
11
+ description = " Bro i dont want learn! i just want results "
12
+ def generate_text(input_text):
13
+ output = llm(f"Q: {input_text} \n A:", max_tokens=521, stop=["Q:", "\n"], echo=True,)
14
+ return output['choices'][0]['text']
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text")
18
  output_text = gr.outputs.Textbox(label="Output text")
19
 
20
+ demo = gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Running Llama on CPU is Hard", description=description, examples=examples)
 
 
 
 
 
 
 
 
21
  demo.queue()
22
  demo.launch()