Filip commited on
Commit
ab546a4
·
1 Parent(s): b03e00d
Files changed (2) hide show
  1. .gitignore +2 -1
  2. app.py +99 -68
.gitignore CHANGED
@@ -1 +1,2 @@
1
- venv
 
 
1
+ venv
2
+ .gradio
app.py CHANGED
@@ -1,9 +1,8 @@
1
  import gradio as gr
2
  from llama_cpp import Llama
3
  from huggingface_hub import hf_hub_download
4
-
5
- # Global variable to track model loading status
6
- model = None
7
 
8
  def load_model():
9
  repo_id = "forestav/gguf_lora_model"
@@ -24,26 +23,29 @@ def load_model():
24
 
25
  return model
26
 
27
- def generate_career_response(message, history):
28
- if model is None:
29
- return "Model is still loading. Please wait..."
30
-
31
- enhanced_prompt = f"""As a career development advisor, help the user with their professional growth.
32
- Consider:
33
- 1. Skill development opportunities
34
- 2. Industry trends
35
- 3. Practical next steps
36
- 4. Resources and learning paths
 
37
 
38
- User Query: {message}
 
 
 
 
 
 
39
 
40
- Provide a structured response with actionable advice."""
41
-
42
  response = model.create_chat_completion(
43
- messages=[
44
- {"role": "system", "content": "You are a professional career advisor focused on providing practical, actionable guidance for career development."},
45
- {"role": "user", "content": enhanced_prompt}
46
- ],
47
  max_tokens=512,
48
  temperature=0.7,
49
  top_p=0.95,
@@ -51,53 +53,82 @@ def generate_career_response(message, history):
51
 
52
  return response['choices'][0]['message']['content']
53
 
54
- # Create the interface first
55
- demo = gr.ChatInterface(
56
- fn=generate_career_response,
57
- title="Career Growth Navigator 🚀",
58
- description="""Your AI career development partner. Ask about:
59
- Skill development paths
60
- • Career transition strategies
61
- Industry trends and opportunities
62
- Resume and interview preparation
63
- • Professional networking advice
64
- • Work-life balance
65
- Let's shape your professional future together!""",
66
- examples=[
67
- "I'm a software developer wanting to transition into AI/ML. What skills should I focus on?",
68
- "How can I improve my leadership skills in my current role?",
69
- "What are the key trends in digital marketing I should be aware of?",
70
- "I want to start freelancing in web development. Where should I begin?",
71
- "How can I negotiate a promotion in my current position?"
72
- ]
73
- )
74
-
75
- # Create loading interface
76
- with gr.Blocks() as loading_demo:
77
- gr.Markdown("# Loading Career Growth Navigator 🚀")
78
- with gr.Row():
79
- loading_msg = gr.Markdown("⏳ The model is currently loading. Please wait...")
80
 
81
- if __name__ == "__main__":
82
- # Start with loading interface
83
- loading_demo.queue()
84
- loading_demo.launch(
85
- server_name="0.0.0.0",
86
- server_port=7860,
87
- share=False,
88
- prevent_thread_lock=True
89
- )
90
-
91
- # Load the model
92
- print("Starting model loading...")
93
- model = load_model()
94
- print("Model loaded successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
- # Close loading interface and launch main interface
97
- loading_demo.close()
98
- demo.queue()
99
- demo.launch(
100
- server_name="0.0.0.0",
101
- server_port=7860,
102
- share=False
103
- )
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from llama_cpp import Llama
3
  from huggingface_hub import hf_hub_download
4
+ import json
5
+ import re
 
6
 
7
  def load_model():
8
  repo_id = "forestav/gguf_lora_model"
 
23
 
24
  return model
25
 
26
+ # Enhanced generation with multiple modes
27
+ def generate_response(message, history, mode='chat'):
28
+ # Preprocessing based on mode
29
+ if mode == 'code':
30
+ system_prompt = "You are an expert coding assistant. Provide clean, efficient code solutions."
31
+ elif mode == 'creative':
32
+ system_prompt = "You are a creative writing assistant. Generate imaginative and engaging content."
33
+ elif mode == 'analytical':
34
+ system_prompt = "You are an analytical assistant. Provide deep, structured insights and reasoning."
35
+ else:
36
+ system_prompt = "You are a helpful AI assistant."
37
 
38
+ # Prepare messages with system context
39
+ messages = [
40
+ {"role": "system", "content": system_prompt},
41
+ *[{"role": "user" if i % 2 == 0 else "assistant", "content": msg}
42
+ for i, msg in enumerate(sum(history, []))],
43
+ {"role": "user", "content": message}
44
+ ]
45
 
46
+ # Generate response
 
47
  response = model.create_chat_completion(
48
+ messages=messages,
 
 
 
49
  max_tokens=512,
50
  temperature=0.7,
51
  top_p=0.95,
 
53
 
54
  return response['choices'][0]['message']['content']
55
 
56
+ # Extract structured data from text
57
+ def extract_structured_data(text):
58
+ try:
59
+ # Try to extract JSON-like structures
60
+ json_match = re.search(r'\{.*\}', text, re.DOTALL)
61
+ if json_match:
62
+ try:
63
+ return json.loads(json_match.group(0))
64
+ except json.JSONDecodeError:
65
+ pass
66
+
67
+ # Fall back to custom parsing for key-value pairs
68
+ data = {}
69
+ for line in text.split('\n'):
70
+ if ':' in line:
71
+ key, value = line.split(':', 1)
72
+ data[key.strip()] = value.strip()
73
+
74
+ return data
75
+ except Exception as e:
76
+ return {"error": str(e)}
 
 
 
 
 
77
 
78
+ # Create Gradio interface with multiple tabs
79
+ def create_interface():
80
+ with gr.Blocks() as demo:
81
+ gr.Markdown("# Multi-Mode AI Assistant")
82
+
83
+ with gr.Tabs():
84
+ # Chat Interface
85
+ with gr.TabItem("Conversational Chat"):
86
+ chat_interface = gr.ChatInterface(
87
+ fn=lambda message, history: generate_response(message, history, 'chat'),
88
+ title="Conversational AI",
89
+ description="General-purpose conversation mode"
90
+ )
91
+
92
+ # Code Generation Tab
93
+ with gr.TabItem("Code Assistant"):
94
+ code_interface = gr.ChatInterface(
95
+ fn=lambda message, history: generate_response(message, history, 'code'),
96
+ title="AI Code Generator",
97
+ description="Generate code snippets and solve programming challenges"
98
+ )
99
+
100
+ # Creative Writing Tab
101
+ with gr.TabItem("Creative Writing"):
102
+ creative_interface = gr.ChatInterface(
103
+ fn=lambda message, history: generate_response(message, history, 'creative'),
104
+ title="Creative Writing Assistant",
105
+ description="Generate stories, poems, and creative content"
106
+ )
107
+
108
+ # Data Extraction Tab
109
+ with gr.TabItem("Data Extractor"):
110
+ with gr.Row():
111
+ text_input = gr.Textbox(label="Input Text")
112
+ extract_btn = gr.Button("Extract Structured Data")
113
+ json_output = gr.JSON(label="Extracted Data")
114
+
115
+ extract_btn.click(
116
+ fn=extract_structured_data,
117
+ inputs=text_input,
118
+ outputs=json_output
119
+ )
120
 
121
+ return demo
122
+
123
+ # Load model globally
124
+ print("Starting model loading...")
125
+ model = load_model()
126
+ print("Model loaded successfully!")
127
+
128
+ # Create and launch the interface
129
+ demo = create_interface()
130
+ demo.launch(
131
+ server_name="0.0.0.0", # Necessary for Spaces
132
+ server_port=7860, # Standard port for Spaces
133
+ share=True # Don't need share link in Spaces
134
+ )