pabloce commited on
Commit
e3f3865
·
verified ·
1 Parent(s): 6063102

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -11
app.py CHANGED
@@ -2,6 +2,12 @@ import spaces
2
  import json
3
  import subprocess
4
  import gradio as gr
 
 
 
 
 
 
5
  from huggingface_hub import hf_hub_download
6
  from duckduckgo_search import DDGS
7
  from trafilatura import fetch_url, extract
@@ -14,11 +20,6 @@ examples = [
14
  ["filetype:pdf intitle:python"]
15
  ]
16
 
17
- # subprocess.run(
18
- # 'pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124',
19
- # shell=True)
20
- # subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
21
-
22
  hf_hub_download(
23
  repo_id="bartowski/Mistral-7B-Instruct-v0.3-GGUF",
24
  filename="Mistral-7B-Instruct-v0.3-Q6_K.gguf",
@@ -156,12 +157,6 @@ def respond(
156
  repeat_penalty,
157
  model,
158
  ):
159
- from llama_cpp import Llama
160
- from llama_cpp_agent import LlamaCppAgent
161
- from llama_cpp_agent.providers import LlamaCppPythonProvider
162
- from llama_cpp_agent.chat_history import BasicChatHistory
163
- from llama_cpp_agent.chat_history.messages import Roles
164
- from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings
165
  chat_template = get_messages_formatter_type(model)
166
  model_selected = model
167
 
 
2
  import json
3
  import subprocess
4
  import gradio as gr
5
+ from llama_cpp import Llama
6
+ from llama_cpp_agent import LlamaCppAgent
7
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
8
+ from llama_cpp_agent.chat_history import BasicChatHistory
9
+ from llama_cpp_agent.chat_history.messages import Roles
10
+ from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings
11
  from huggingface_hub import hf_hub_download
12
  from duckduckgo_search import DDGS
13
  from trafilatura import fetch_url, extract
 
20
  ["filetype:pdf intitle:python"]
21
  ]
22
 
 
 
 
 
 
23
  hf_hub_download(
24
  repo_id="bartowski/Mistral-7B-Instruct-v0.3-GGUF",
25
  filename="Mistral-7B-Instruct-v0.3-Q6_K.gguf",
 
157
  repeat_penalty,
158
  model,
159
  ):
 
 
 
 
 
 
160
  chat_template = get_messages_formatter_type(model)
161
  model_selected = model
162