File size: 1,616 Bytes
475ca62
a1b5e12
8c4ab6b
b51e1ff
8c4ab6b
 
 
 
 
 
 
 
 
b51e1ff
 
a1b5e12
 
b51e1ff
a1b5e12
 
 
83e1fb4
b51e1ff
 
 
475ca62
a1b5e12
 
 
 
 
 
 
 
 
475ca62
 
b51e1ff
a1b5e12
b51e1ff
 
 
 
 
 
a1b5e12
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
from gradio import themes
from transformers import AutoModelForCausalLM, AutoTokenizer
import numpy as np

# Load the model and tokenizer
model_id = "vikhyatk/moondream2"
revision = "2024-05-20"
model = AutoModelForCausalLM.from_pretrained(
    model_id, trust_remote_code=True, revision=revision
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)

def analyze_image_direct(image, question):
    # Convert PIL Image to the format expected by the model
    # This is a placeholder transformation; adjust as needed
    enc_image = np.array(image)
    
    # Example of processing text input with the model
    inputs = tokenizer(question, return_tensors='pt')
    outputs = model.generate(**inputs, max_length=50)
    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    return answer

# Define a custom theme with purple color scheme
class PurpleTheme(themes.Theme):
    base = "light"
    font = "Arial"
    colors = {
        "primary": "#9b59b6",
        "text": "#FFFFFF",
        "background": "#5B2C6F",
        "secondary_background": "#7D3C98",
    }

# Create Gradio interface with the custom theme
iface = gr.Interface(fn=analyze_image_direct,
                     theme=PurpleTheme(),
                     inputs=[gr.Image(type="pil"), gr.Textbox(lines=2, placeholder="Enter your question here...")],
                     outputs='text',
                     title="Direct Image Question Answering",
                     description="Upload an image and ask a question about it directly using the model.")

# Launch the interface
iface.launch()