File size: 3,730 Bytes
4ade08c d5c72cc b03e00d d5c72cc 16a33a8 d5c72cc c5c8f7b ec230fe c5c8f7b ab546a4 c5c8f7b ab546a4 ec230fe ab546a4 d5c72cc ab546a4 c5c8f7b d5c72cc c5c8f7b b03e00d c5c8f7b ab546a4 c5c8f7b ab546a4 c5c8f7b ab546a4 c5c8f7b b03e00d c5c8f7b ab546a4 22dc869 ab546a4 c5c8f7b ab546a4 c5c8f7b ab546a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import gradio as gr
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
def load_model():
repo_id = "forestav/gguf_lora_model"
model_file = "unsloth.F16.gguf"
local_path = hf_hub_download(
repo_id=repo_id,
filename=model_file
)
print(f"Loading model from: {local_path}")
model = Llama(
model_path=local_path,
n_ctx=2048,
n_threads=8
)
return model
def generate_instructions(input_text, instruction_type, complexity, audience):
# Craft a comprehensive system prompt
system_prompt = f"""You are an expert at creating clear, precise instructions.
Generate instructions that are:
- Type: {instruction_type}
- Complexity Level: {complexity}
- Target Audience: {audience}
Core Input Context: {input_text}
Guidelines:
- Use clear, step-by-step language
- Ensure instructions are actionable and specific
- Include safety warnings or prerequisites if relevant
- Adapt complexity to the specified audience level"""
# Prepare messages for instruction generation
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": f"Please generate comprehensive instructions for: {input_text}"}
]
# Generate response
response = model.create_chat_completion(
messages=messages,
max_tokens=1024,
temperature=0.7,
top_p=0.95,
)
return response['choices'][0]['message']['content']
# Load model globally
print("Starting model loading...")
model = load_model()
print("Model loaded successfully!")
# Create Gradio interface
demo = gr.Blocks(title="Instruction Craft AI")
with demo:
gr.Markdown("# 📝 Instruction Crafting Assistant")
gr.Markdown("Generate precise, tailored instructions for any task or process.")
with gr.Row():
with gr.Column():
input_text = gr.Textbox(label="Describe the task or process")
instruction_type = gr.Dropdown(
label="Instruction Type",
choices=[
"How-to Guide",
"Technical Manual",
"Safety Procedure",
"Educational Tutorial",
"Cooking Recipe",
"DIY Project",
"Professional Workflow"
]
)
complexity = gr.Dropdown(
label="Complexity Level",
choices=[
"Beginner",
"Intermediate",
"Advanced",
"Expert"
]
)
audience = gr.Dropdown(
label="Target Audience",
choices=[
"Children",
"Students",
"General Public",
"Professionals",
"Experts"
]
)
generate_btn = gr.Button("Craft Instructions", variant="primary")
with gr.Column():
output_text = gr.Textbox(label="Generated Instructions", lines=20)
generate_btn.click(
fn=generate_instructions,
inputs=[input_text, instruction_type, complexity, audience],
outputs=output_text
)
# Set default example values
input_text.value = "Change a car tire"
instruction_type.value = "How-to Guide"
complexity.value = "Intermediate"
audience.value = "General Public"
# Launch the interface
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
) |