nmarafo commited on
Commit
3edafbd
verified
1 Parent(s): 9878261

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -63
app.py CHANGED
@@ -1,64 +1,53 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ import subprocess
3
+
4
+ # Ruta al ejecutable de llama.cpp
5
+ LLAMA_CPP_EXECUTABLE = "/llama.cpp/main"
6
+
7
+ # Ruta al modelo GGUF descargado
8
+ MODEL_PATH = "/app/shieldgemma-9b.Q4_K_M.gguf"
9
+
10
+ # Funci贸n para ejecutar llama.cpp y obtener una respuesta
11
+ def llama_cpp_generate(prompt):
12
+ command = [
13
+ LLAMA_CPP_EXECUTABLE,
14
+ "--model", MODEL_PATH,
15
+ "--prompt", prompt,
16
+ "--temp", "0.7",
17
+ "--top_p", "0.95",
18
+ "--repeat_penalty", "1.1",
19
+ "--n_predict", "256"
20
+ ]
21
+
22
+ result = subprocess.run(command, capture_output=True, text=True)
23
+ return result.stdout.strip()
24
+
25
+ def respond(message):
26
+ assistant_prompt = f"You are a friendly assistant.\nUser: {message}\nAssistant:"
27
+ response = llama_cpp_generate(assistant_prompt)
28
+ return response
29
+
30
+ with gr.Blocks() as demo:
31
+ gr.Markdown("# GGUF Chatbot (Experimental)")
32
+ chatbot = gr.Chatbot()
33
+ message = gr.Textbox(label="Your message")
34
+ submit_button = gr.Button("Send")
35
+
36
+ def submit_message(user_message, chat_history):
37
+ chat_history = chat_history + [[user_message, None]]
38
+ assistant_reply = respond(user_message)
39
+ chat_history[-1][1] = assistant_reply
40
+ return "", chat_history
41
+
42
+ submit_button.click(
43
+ submit_message,
44
+ inputs=[message, chatbot],
45
+ outputs=[message, chatbot],
46
+ )
47
+ message.submit(
48
+ submit_message,
49
+ inputs=[message, chatbot],
50
+ outputs=[message, chatbot],
51
+ )
52
+
53
+ demo.launch(debug=True)