Filip
commited on
Commit
Β·
b03e00d
1
Parent(s):
ec230fe
fix loading ui
Browse files
app.py
CHANGED
@@ -2,6 +2,9 @@ import gradio as gr
|
|
2 |
from llama_cpp import Llama
|
3 |
from huggingface_hub import hf_hub_download
|
4 |
|
|
|
|
|
|
|
5 |
def load_model():
|
6 |
repo_id = "forestav/gguf_lora_model"
|
7 |
model_file = "unsloth.F16.gguf"
|
@@ -22,7 +25,9 @@ def load_model():
|
|
22 |
return model
|
23 |
|
24 |
def generate_career_response(message, history):
|
25 |
-
|
|
|
|
|
26 |
enhanced_prompt = f"""As a career development advisor, help the user with their professional growth.
|
27 |
Consider:
|
28 |
1. Skill development opportunities
|
@@ -46,12 +51,7 @@ def generate_career_response(message, history):
|
|
46 |
|
47 |
return response['choices'][0]['message']['content']
|
48 |
|
49 |
-
#
|
50 |
-
print("Starting model loading...")
|
51 |
-
model = load_model()
|
52 |
-
print("Model loaded successfully!")
|
53 |
-
|
54 |
-
# Create Gradio interface with career-focused examples
|
55 |
demo = gr.ChatInterface(
|
56 |
fn=generate_career_response,
|
57 |
title="Career Growth Navigator π",
|
@@ -72,9 +72,32 @@ demo = gr.ChatInterface(
|
|
72 |
]
|
73 |
)
|
74 |
|
75 |
-
#
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from llama_cpp import Llama
|
3 |
from huggingface_hub import hf_hub_download
|
4 |
|
5 |
+
# Global variable to track model loading status
|
6 |
+
model = None
|
7 |
+
|
8 |
def load_model():
|
9 |
repo_id = "forestav/gguf_lora_model"
|
10 |
model_file = "unsloth.F16.gguf"
|
|
|
25 |
return model
|
26 |
|
27 |
def generate_career_response(message, history):
|
28 |
+
if model is None:
|
29 |
+
return "Model is still loading. Please wait..."
|
30 |
+
|
31 |
enhanced_prompt = f"""As a career development advisor, help the user with their professional growth.
|
32 |
Consider:
|
33 |
1. Skill development opportunities
|
|
|
51 |
|
52 |
return response['choices'][0]['message']['content']
|
53 |
|
54 |
+
# Create the interface first
|
|
|
|
|
|
|
|
|
|
|
55 |
demo = gr.ChatInterface(
|
56 |
fn=generate_career_response,
|
57 |
title="Career Growth Navigator π",
|
|
|
72 |
]
|
73 |
)
|
74 |
|
75 |
+
# Create loading interface
|
76 |
+
with gr.Blocks() as loading_demo:
|
77 |
+
gr.Markdown("# Loading Career Growth Navigator π")
|
78 |
+
with gr.Row():
|
79 |
+
loading_msg = gr.Markdown("β³ The model is currently loading. Please wait...")
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
# Start with loading interface
|
83 |
+
loading_demo.queue()
|
84 |
+
loading_demo.launch(
|
85 |
+
server_name="0.0.0.0",
|
86 |
+
server_port=7860,
|
87 |
+
share=False,
|
88 |
+
prevent_thread_lock=True
|
89 |
+
)
|
90 |
+
|
91 |
+
# Load the model
|
92 |
+
print("Starting model loading...")
|
93 |
+
model = load_model()
|
94 |
+
print("Model loaded successfully!")
|
95 |
+
|
96 |
+
# Close loading interface and launch main interface
|
97 |
+
loading_demo.close()
|
98 |
+
demo.queue()
|
99 |
+
demo.launch(
|
100 |
+
server_name="0.0.0.0",
|
101 |
+
server_port=7860,
|
102 |
+
share=False
|
103 |
+
)
|