Claudz163 commited on
Commit
63eb096
·
1 Parent(s): b0f6ad7
Files changed (1) hide show
  1. app.py +45 -2
app.py CHANGED
@@ -1,4 +1,47 @@
1
  import streamlit as st
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
+ from PIL import Image
4
 
5
+
6
+
7
+
8
+ st.header("Character Captions")
9
+ st.write("Have a character caption any image you upload!")
10
+ character = st.selectbox("Choose a character", ["rapper", "monkey", "shrek", "unintelligible"])
11
+
12
+ uploaded_img = st.file_uploader("Upload an image")
13
+
14
+ if uploaded_img is not None:
15
+
16
+ image = Image.open(uploaded_img)
17
+ st.image(image)
18
+
19
+ image_captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
20
+
21
+ response = image_captioner(image)
22
+ caption = response[0]['generated_text']
23
+ st.write("Caption:", caption)
24
+
25
+ character_prompts = {
26
+ "rapper": f"Describe this scene like you're a rapper singing: {caption}.",
27
+ "monkey": f"Describe this scene like you're a monkey going bananas: {caption}.",
28
+ "shrek": f"Describe this scene like you're Shrek: {caption}.",
29
+ "unintelligible": f"Describe this scene in a way that makes no sense: {caption}."
30
+ }
31
+
32
+ prompt = character_prompts[character]
33
+ st.write(prompt)
34
+
35
+
36
+
37
+
38
+ # pipe = pipeline("text-generation", model="meta-llama/Meta-Llama-Guard-2-8B")
39
+
40
+ # # Pass the caption to another model
41
+
42
+ # personality = "rapper"
43
+ # prompt = personality_prompts[personality]
44
+
45
+ # styled_caption = text_generator(prompt, max_length=100, num_return_sequences=1)
46
+
47
+ # print(styled_caption[0]["generated_text"])