maverick-x commited on
Commit
20834fb
·
verified ·
1 Parent(s): 665187a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -0
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from unsloth import FastLanguageModel
3
+ import torch
4
+
5
+ # Load the model and tokenizer
6
+ max_seq_length = 2048
7
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
8
+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
9
+
10
+ # Initialize the model
11
+ model, tokenizer = FastLanguageModel.from_pretrained(
12
+ model_name="suhaifLLM/unsloth-llama3-8b-instruct-4bit",
13
+ max_seq_length=max_seq_length,
14
+ dtype=dtype,
15
+ load_in_4bit=load_in_4bit
16
+ )
17
+
18
+ # Default instruction
19
+ default_instruction = "You are a creative writer. Based on the given input, generate a well-structured story with an engaging plot, well-developed characters, and immersive details. Ensure the story has a clear beginning, middle, and end. Include dialogue and descriptions to bring the story to life. You can add a twist to the story also."
20
+
21
+ def format_prompt(input_text, instruction=default_instruction):
22
+ return f"{instruction}\n\nInput:\n{input_text}\n\nResponse:\n"
23
+
24
+ # Streamlit App
25
+ st.title("Interactive Storytelling Assistant")
26
+ st.write("Create your story prompt and receive story suggestions!")
27
+
28
+ # User input for story prompt
29
+ user_input = st.text_area("Enter your story idea:", "A young adventurer embarks on a journey to find a lost treasure.")
30
+ generate_story = st.button("Generate Story")
31
+
32
+ if generate_story and user_input:
33
+ # Prepare inputs for the model
34
+ inputs = tokenizer(
35
+ [format_prompt(user_input)],
36
+ return_tensors="pt"
37
+ ).to("cuda")
38
+
39
+ # Generate story
40
+ outputs = model.generate(**inputs, max_new_tokens=500, use_cache=True)
41
+ generated_story = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
+
43
+ # Display generated story
44
+ st.subheader("Generated Story:")
45
+ st.write(generated_story)
46
+
47
+ # Feedback mechanism
48
+ st.subheader("Rate the Story")
49
+ story_rating = st.slider("How would you rate this story?", 1, 5)
50
+ user_feedback = st.text_area("Additional Feedback/Suggestions:")
51
+
52
+ if st.button("Submit Feedback"):
53
+ st.write("Thank you for your feedback!")
54
+ # Process feedback (In a real scenario, this would be saved to a database)
55
+
56
+ # Community engagement
57
+ st.subheader("Share Your Story")
58
+ user_story = st.text_area("Write or paste your own story here:")
59
+ if st.button("Share Story"):
60
+ st.write("Thank you for sharing your story!")
61
+ # Save the story (In a real scenario, this would be saved to a shared community platform)
62
+
63
+ # Display shared stories (Placeholder example)
64
+ st.subheader("Community Stories")
65
+ st.write("Story 1: An epic tale of adventure...") # Placeholder for actual stories
66
+
67
+ # Critique section
68
+ st.text_area("Leave a critique for Story 1:")
69
+ if st.button("Submit Critique"):
70
+ st.write("Thank you for your critique!")