Tijmen2 commited on
Commit
39101eb
·
verified ·
1 Parent(s): b5aa0f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -11
app.py CHANGED
@@ -1,11 +1,24 @@
1
-
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- """
6
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
- """
8
- client = InferenceClient("https://huggingface.co/AstroMLab/AstroSage-8B-GGUF/blob/main/AstroSage-8B-Q8_0.gguf")
9
 
10
 
11
  def respond(
@@ -41,13 +54,14 @@ def respond(
41
  yield response
42
 
43
 
44
- """
45
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
46
- """
47
  demo = gr.ChatInterface(
48
  respond,
49
  additional_inputs=[
50
- gr.Textbox(value="Assume the role of AstroSage, a helpful chatbot designed to answer user queries about astronomy, astrophysics, and cosmology.", label="System message"),
 
 
 
51
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
  gr.Slider(
@@ -60,7 +74,5 @@ demo = gr.ChatInterface(
60
  ],
61
  )
62
 
63
-
64
  if __name__ == "__main__":
65
  demo.launch()
66
-
 
1
+ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+ import requests
5
+
6
+ # Define the file path and URL
7
+ model_filename = "AstroSage-8B-Q8.0.gguf"
8
+ model_url = "https://huggingface.co/AstroMLab/AstroSage-8B-GGUF/resolve/main/AstroSage-8B-Q8.0.gguf"
9
+
10
+ # Check if the model file exists locally; if not, download it
11
+ if not os.path.exists(model_filename):
12
+ print(f"{model_filename} not found. Downloading...")
13
+ response = requests.get(model_url, stream=True)
14
+ response.raise_for_status() # Check for any download errors
15
+ with open(model_filename, "wb") as f:
16
+ for chunk in response.iter_content(chunk_size=8192):
17
+ f.write(chunk)
18
+ print(f"Downloaded {model_filename} successfully.")
19
 
20
+ # Initialize the InferenceClient with the local file path
21
+ client = InferenceClient(model_filename)
 
 
22
 
23
 
24
  def respond(
 
54
  yield response
55
 
56
 
57
+ # Gradio Chat Interface
 
 
58
  demo = gr.ChatInterface(
59
  respond,
60
  additional_inputs=[
61
+ gr.Textbox(
62
+ value="Assume the role of AstroSage, a helpful chatbot designed to answer user queries about astronomy, astrophysics, and cosmology.",
63
+ label="System message",
64
+ ),
65
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
66
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
67
  gr.Slider(
 
74
  ],
75
  )
76
 
 
77
  if __name__ == "__main__":
78
  demo.launch()