Filip commited on
Commit
c5c8f7b
·
1 Parent(s): ab546a4
Files changed (1) hide show
  1. app.py +87 -88
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import gradio as gr
2
  from llama_cpp import Llama
3
  from huggingface_hub import hf_hub_download
4
- import json
5
- import re
6
 
7
  def load_model():
8
  repo_id = "forestav/gguf_lora_model"
@@ -23,112 +21,113 @@ def load_model():
23
 
24
  return model
25
 
26
- # Enhanced generation with multiple modes
27
- def generate_response(message, history, mode='chat'):
28
- # Preprocessing based on mode
29
- if mode == 'code':
30
- system_prompt = "You are an expert coding assistant. Provide clean, efficient code solutions."
31
- elif mode == 'creative':
32
- system_prompt = "You are a creative writing assistant. Generate imaginative and engaging content."
33
- elif mode == 'analytical':
34
- system_prompt = "You are an analytical assistant. Provide deep, structured insights and reasoning."
35
- else:
36
- system_prompt = "You are a helpful AI assistant."
37
 
38
- # Prepare messages with system context
 
 
 
 
 
 
 
 
39
  messages = [
40
  {"role": "system", "content": system_prompt},
41
- *[{"role": "user" if i % 2 == 0 else "assistant", "content": msg}
42
- for i, msg in enumerate(sum(history, []))],
43
- {"role": "user", "content": message}
44
  ]
45
 
46
  # Generate response
47
  response = model.create_chat_completion(
48
  messages=messages,
49
- max_tokens=512,
50
  temperature=0.7,
51
  top_p=0.95,
52
  )
53
 
54
  return response['choices'][0]['message']['content']
55
 
56
- # Extract structured data from text
57
- def extract_structured_data(text):
58
- try:
59
- # Try to extract JSON-like structures
60
- json_match = re.search(r'\{.*\}', text, re.DOTALL)
61
- if json_match:
62
- try:
63
- return json.loads(json_match.group(0))
64
- except json.JSONDecodeError:
65
- pass
66
-
67
- # Fall back to custom parsing for key-value pairs
68
- data = {}
69
- for line in text.split('\n'):
70
- if ':' in line:
71
- key, value = line.split(':', 1)
72
- data[key.strip()] = value.strip()
73
-
74
- return data
75
- except Exception as e:
76
- return {"error": str(e)}
77
 
78
- # Create Gradio interface with multiple tabs
79
- def create_interface():
80
- with gr.Blocks() as demo:
81
- gr.Markdown("# Multi-Mode AI Assistant")
82
-
83
- with gr.Tabs():
84
- # Chat Interface
85
- with gr.TabItem("Conversational Chat"):
86
- chat_interface = gr.ChatInterface(
87
- fn=lambda message, history: generate_response(message, history, 'chat'),
88
- title="Conversational AI",
89
- description="General-purpose conversation mode"
90
- )
 
 
 
 
 
 
 
 
 
 
91
 
92
- # Code Generation Tab
93
- with gr.TabItem("Code Assistant"):
94
- code_interface = gr.ChatInterface(
95
- fn=lambda message, history: generate_response(message, history, 'code'),
96
- title="AI Code Generator",
97
- description="Generate code snippets and solve programming challenges"
98
- )
 
 
99
 
100
- # Creative Writing Tab
101
- with gr.TabItem("Creative Writing"):
102
- creative_interface = gr.ChatInterface(
103
- fn=lambda message, history: generate_response(message, history, 'creative'),
104
- title="Creative Writing Assistant",
105
- description="Generate stories, poems, and creative content"
106
- )
 
 
 
107
 
108
- # Data Extraction Tab
109
- with gr.TabItem("Data Extractor"):
110
- with gr.Row():
111
- text_input = gr.Textbox(label="Input Text")
112
- extract_btn = gr.Button("Extract Structured Data")
113
- json_output = gr.JSON(label="Extracted Data")
114
-
115
- extract_btn.click(
116
- fn=extract_structured_data,
117
- inputs=text_input,
118
- outputs=json_output
119
- )
120
 
121
- return demo
 
 
 
 
122
 
123
- # Load model globally
124
- print("Starting model loading...")
125
- model = load_model()
126
- print("Model loaded successfully!")
 
 
 
 
 
 
127
 
128
- # Create and launch the interface
129
- demo = create_interface()
130
  demo.launch(
131
- server_name="0.0.0.0", # Necessary for Spaces
132
- server_port=7860, # Standard port for Spaces
133
- share=True # Don't need share link in Spaces
134
  )
 
1
  import gradio as gr
2
  from llama_cpp import Llama
3
  from huggingface_hub import hf_hub_download
 
 
4
 
5
  def load_model():
6
  repo_id = "forestav/gguf_lora_model"
 
21
 
22
  return model
23
 
24
+ def generate_instructions(input_text, instruction_type, complexity, audience):
25
+ # Craft a comprehensive system prompt
26
+ system_prompt = f"""You are an expert at creating clear, precise instructions.
27
+ Generate instructions that are:
28
+ - Type: {instruction_type}
29
+ - Complexity Level: {complexity}
30
+ - Target Audience: {audience}
 
 
 
 
31
 
32
+ Core Input Context: {input_text}
33
+
34
+ Guidelines:
35
+ - Use clear, step-by-step language
36
+ - Ensure instructions are actionable and specific
37
+ - Include safety warnings or prerequisites if relevant
38
+ - Adapt complexity to the specified audience level"""
39
+
40
+ # Prepare messages for instruction generation
41
  messages = [
42
  {"role": "system", "content": system_prompt},
43
+ {"role": "user", "content": f"Please generate comprehensive instructions for: {input_text}"}
 
 
44
  ]
45
 
46
  # Generate response
47
  response = model.create_chat_completion(
48
  messages=messages,
49
+ max_tokens=1024,
50
  temperature=0.7,
51
  top_p=0.95,
52
  )
53
 
54
  return response['choices'][0]['message']['content']
55
 
56
+ # Load model globally
57
+ print("Starting model loading...")
58
+ model = load_model()
59
+ print("Model loaded successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
+ # Create Gradio interface
62
+ demo = gr.Blocks(title="Instruction Craft AI")
63
+
64
+ with demo:
65
+ gr.Markdown("# 📝 Instruction Crafting Assistant")
66
+ gr.Markdown("Generate precise, tailored instructions for any task or process.")
67
+
68
+ with gr.Row():
69
+ with gr.Column():
70
+ input_text = gr.Textbox(label="Describe the task or process")
71
+
72
+ instruction_type = gr.Dropdown(
73
+ label="Instruction Type",
74
+ choices=[
75
+ "How-to Guide",
76
+ "Technical Manual",
77
+ "Safety Procedure",
78
+ "Educational Tutorial",
79
+ "Cooking Recipe",
80
+ "DIY Project",
81
+ "Professional Workflow"
82
+ ]
83
+ )
84
 
85
+ complexity = gr.Dropdown(
86
+ label="Complexity Level",
87
+ choices=[
88
+ "Beginner",
89
+ "Intermediate",
90
+ "Advanced",
91
+ "Expert"
92
+ ]
93
+ )
94
 
95
+ audience = gr.Dropdown(
96
+ label="Target Audience",
97
+ choices=[
98
+ "Children",
99
+ "Students",
100
+ "General Public",
101
+ "Professionals",
102
+ "Experts"
103
+ ]
104
+ )
105
 
106
+ generate_btn = gr.Button("Craft Instructions", variant="primary")
107
+
108
+ with gr.Column():
109
+ output_text = gr.Textbox(label="Generated Instructions", lines=20)
 
 
 
 
 
 
 
 
110
 
111
+ generate_btn.click(
112
+ fn=generate_instructions,
113
+ inputs=[input_text, instruction_type, complexity, audience],
114
+ outputs=output_text
115
+ )
116
 
117
+ # Add some example inputs
118
+ demo.load(
119
+ fn=lambda: {
120
+ input_text: "Change a car tire",
121
+ instruction_type: "How-to Guide",
122
+ complexity: "Intermediate",
123
+ audience: "General Public"
124
+ },
125
+ outputs=[input_text, instruction_type, complexity, audience]
126
+ )
127
 
128
+ # Launch the interface
 
129
  demo.launch(
130
+ server_name="0.0.0.0",
131
+ server_port=7860,
132
+ share=False
133
  )