File size: 3,290 Bytes
c9d0310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import streamlit as st
from transformers import (
    MarianMTModel, MarianTokenizer, 
    GPT2LMHeadModel, GPT2Tokenizer,
    pipeline
)
st.title("Multi Chatbot")
models = {
    "English to French": {
        "name": "Helsinki-NLP/opus-mt-en-fr",
        "description": "Translate English text to French."
    },
    "Sentiment Analysis": {
        "name": "distilbert-base-uncased-finetuned-sst-2-english",
        "description": "Analyze the sentiment of input text."
    },
    "Story Generator": {
        "name": "distilgpt2",  
        "description": "Generate creative stories based on input."
    }
}
st.sidebar.header("Choose a Model")
selected_model_key = st.sidebar.radio("Select a Model:", list(models.keys()))
model_name = models[selected_model_key]["name"]
model_description = models[selected_model_key]["description"]
st.sidebar.markdown(f"### Model Description\n{model_description}")
try:
    if selected_model_key == "English to French":
        st.write("Loading English to French model...")
        tokenizer = MarianTokenizer.from_pretrained(model_name) 
        model = MarianMTModel.from_pretrained(model_name)  
        st.write("English to French model loaded successfully.")
    elif selected_model_key == "Sentiment Analysis":
        st.write("Loading Sentiment Analysis model...")
        sentiment_analyzer = pipeline("sentiment-analysis", model=model_name)
        st.write("Sentiment Analysis model loaded successfully.")
    elif selected_model_key == "Story Generator":
        st.write("Loading Story Generator model...")
        tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") 
        model = GPT2LMHeadModel.from_pretrained("distilgpt2")
        tokenizer.pad_token = tokenizer.eos_token
        st.write("Story Generator model loaded successfully.")
except Exception as e:
    st.error(f"Failed to load the model: {e}")
user_input = st.text_input("Enter your query:")
if user_input:
    if selected_model_key == "English to French":
        try:
            inputs = tokenizer(user_input, return_tensors="pt", truncation=True, padding=True)
            outputs = model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1, no_repeat_ngram_size=2)
            bot_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
            st.write(f"Translated Text: {bot_response}")
        except Exception as e:
            st.error(f"Error during translation: {e}")
    elif selected_model_key == "Sentiment Analysis":
        try:
            result = sentiment_analyzer(user_input)[0]
            st.write(f"Sentiment: {result['label']}")
            st.write(f"Confidence: {result['score']:.2f}")
        except Exception as e:
            st.error(f"Error during sentiment analysis: {e}")
    elif selected_model_key == "Story Generator":
        try:
            inputs = tokenizer(user_input, return_tensors="pt", truncation=True, padding=True)
            outputs = model.generate(inputs["input_ids"], max_length=500, num_return_sequences=1, no_repeat_ngram_size=2, temperature=0.7)
            bot_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
            st.write(f"Generated Story: {bot_response}")
        except Exception as e:
            st.error(f"Error during story generation: {e}")