suhaifLLM commited on
Commit
73c22e3
·
verified ·
1 Parent(s): ecdceea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -45
app.py CHANGED
@@ -1,61 +1,70 @@
1
- # file: interactive_storytelling.py
2
  import streamlit as st
3
  from unsloth import FastLanguageModel
4
- from transformers import TextStreamer
5
- # import torch
6
 
7
- # Initialize the model and tokenizer
8
  max_seq_length = 2048
9
- dtype = None # Auto-detection
10
- load_in_4bit = True
11
 
12
- # Load the pre-trained model
13
  model, tokenizer = FastLanguageModel.from_pretrained(
14
  model_name="suhaif/unsloth-llama-3-8b-4bit",
15
  max_seq_length=max_seq_length,
16
  dtype=dtype,
17
- load_in_4bit=load_in_4bit,
18
  )
19
 
20
- FastLanguageModel.for_inference(model)
 
21
 
22
- # Set up the Streamlit app
 
 
 
23
  st.title("Interactive Storytelling Assistant")
24
- st.write("Collaboratively write stories and get real-time suggestions!")
25
-
26
- # Text input for the user prompt
27
- user_input = st.text_area("Write the beginning of your story:", placeholder="Once upon a time...")
28
-
29
- if st.button("Generate Suggestions"):
30
- if user_input:
31
- # Tokenize and send the input to the model
32
- inputs = tokenizer([user_input], return_tensors="pt")
33
-
34
- # Stream the generated output
35
- text_streamer = TextStreamer(tokenizer)
36
- st.write("Generating suggestions...")
37
- generated_text = model.generate(**inputs, streamer=text_streamer, max_new_tokens=250)
38
-
39
- # Display the generated text
40
- st.subheader("Story Suggestions:")
41
- st.write(generated_text)
42
-
43
- # Feedback section for rating
44
- rating = st.slider("Rate the suggestion:", 1, 5)
45
- if rating:
46
- st.success(f"Thank you for your feedback! You rated this {rating} stars.")
47
-
48
- # Community sharing section
49
- st.subheader("Community Stories")
50
- st.write("Upload your story and share it with the community!")
51
 
52
- uploaded_story = st.file_uploader("Upload your story (txt file)", type="txt")
 
 
 
53
 
54
- if uploaded_story:
55
- # Display uploaded story
56
- story_text = uploaded_story.read().decode("utf-8")
57
- st.text_area("Your Uploaded Story:", story_text, height=300)
 
 
 
 
 
 
 
 
 
 
58
 
59
- # Display other community stories (could be expanded with database integration)
60
- st.subheader("Community Stories Feedback Section")
61
- st.write("In this section, you can provide critiques for other community stories.")
 
 
 
1
  import streamlit as st
2
  from unsloth import FastLanguageModel
3
+ import torch
 
4
 
5
+ # Load the model and tokenizer
6
  max_seq_length = 2048
7
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
8
+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
9
 
10
+ # Initialize the model
11
  model, tokenizer = FastLanguageModel.from_pretrained(
12
  model_name="suhaif/unsloth-llama-3-8b-4bit",
13
  max_seq_length=max_seq_length,
14
  dtype=dtype,
15
+ load_in_4bit=load_in_4bit
16
  )
17
 
18
+ # Default instruction
19
+ default_instruction = "You are a creative writer. Based on the given input, generate a well-structured story with an engaging plot, well-developed characters, and immersive details. Ensure the story has a clear beginning, middle, and end. Include dialogue and descriptions to bring the story to life. You can add a twist to the story also."
20
 
21
+ def format_prompt(input_text, instruction=default_instruction):
22
+ return f"{instruction}\n\nInput:\n{input_text}\n\nResponse:\n"
23
+
24
+ # Streamlit App
25
  st.title("Interactive Storytelling Assistant")
26
+ st.write("Create your story prompt and receive story suggestions!")
27
+
28
+ # User input for story prompt
29
+ user_input = st.text_area("Enter your story idea:", "A young adventurer embarks on a journey to find a lost treasure.")
30
+ generate_story = st.button("Generate Story")
31
+
32
+ if generate_story and user_input:
33
+ # Prepare inputs for the model
34
+ inputs = tokenizer(
35
+ [format_prompt(user_input)],
36
+ return_tensors="pt"
37
+ ).to("cuda")
38
+
39
+ # Generate story
40
+ outputs = model.generate(**inputs, max_new_tokens=500, use_cache=True)
41
+ generated_story = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
+
43
+ # Display generated story
44
+ st.subheader("Generated Story:")
45
+ st.write(generated_story)
 
 
 
 
 
 
 
46
 
47
+ # Feedback mechanism
48
+ st.subheader("Rate the Story")
49
+ story_rating = st.slider("How would you rate this story?", 1, 5)
50
+ user_feedback = st.text_area("Additional Feedback/Suggestions:")
51
 
52
+ if st.button("Submit Feedback"):
53
+ st.write("Thank you for your feedback!")
54
+ # Process feedback (In a real scenario, this would be saved to a database)
55
+
56
+ # Community engagement
57
+ st.subheader("Share Your Story")
58
+ user_story = st.text_area("Write or paste your own story here:")
59
+ if st.button("Share Story"):
60
+ st.write("Thank you for sharing your story!")
61
+ # Save the story (In a real scenario, this would be saved to a shared community platform)
62
+
63
+ # Display shared stories (Placeholder example)
64
+ st.subheader("Community Stories")
65
+ st.write("Story 1: An epic tale of adventure...") # Placeholder for actual stories
66
 
67
+ # Critique section
68
+ st.text_area("Leave a critique for Story 1:")
69
+ if st.button("Submit Critique"):
70
+ st.write("Thank you for your critique!")