Spaces:
Sleeping
Sleeping
import streamlit as st | |
from unsloth import FastLanguageModel | |
import torch | |
# Load the model and tokenizer | |
max_seq_length = 2048 | |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ | |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. | |
# Initialize the model | |
model, tokenizer = FastLanguageModel.from_pretrained( | |
model_name="suhaifLLM/unsloth-llama3-8b-instruct-4bit", | |
max_seq_length=max_seq_length, | |
dtype=dtype, | |
load_in_4bit=load_in_4bit | |
) | |
# Default instruction | |
default_instruction = "You are a creative writer. Based on the given input, generate a well-structured story with an engaging plot, well-developed characters, and immersive details. Ensure the story has a clear beginning, middle, and end. Include dialogue and descriptions to bring the story to life. You can add a twist to the story also." | |
def format_prompt(input_text, instruction=default_instruction): | |
return f"{instruction}\n\nInput:\n{input_text}\n\nResponse:\n" | |
# Streamlit App | |
st.title("Interactive Storytelling Assistant") | |
st.write("Create your story prompt and receive story suggestions!") | |
# User input for story prompt | |
user_input = st.text_area("Enter your story idea:", "A young adventurer embarks on a journey to find a lost treasure.") | |
generate_story = st.button("Generate Story") | |
if generate_story and user_input: | |
# Prepare inputs for the model | |
inputs = tokenizer( | |
[format_prompt(user_input)], | |
return_tensors="pt" | |
).to("cuda") | |
# Generate story | |
outputs = model.generate(**inputs, max_new_tokens=500, use_cache=True) | |
generated_story = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Display generated story | |
st.subheader("Generated Story:") | |
st.write(generated_story) | |
# Feedback mechanism | |
st.subheader("Rate the Story") | |
story_rating = st.slider("How would you rate this story?", 1, 5) | |
user_feedback = st.text_area("Additional Feedback/Suggestions:") | |
if st.button("Submit Feedback"): | |
st.write("Thank you for your feedback!") | |
# Process feedback (In a real scenario, this would be saved to a database) | |
# Community engagement | |
st.subheader("Share Your Story") | |
user_story = st.text_area("Write or paste your own story here:") | |
if st.button("Share Story"): | |
st.write("Thank you for sharing your story!") | |
# Save the story (In a real scenario, this would be saved to a shared community platform) | |
# Display shared stories (Placeholder example) | |
st.subheader("Community Stories") | |
st.write("Story 1: An epic tale of adventure...") # Placeholder for actual stories | |
# Critique section | |
st.text_area("Leave a critique for Story 1:") | |
if st.button("Submit Critique"): | |
st.write("Thank you for your critique!") |