File size: 2,105 Bytes
9ca89bc
2534236
edcd68c
 
9ca89bc
edcd68c
 
 
bf8e0d3
edcd68c
 
 
1f8430f
edcd68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf8e0d3
edcd68c
bf8e0d3
edcd68c
 
 
 
 
260a648
edcd68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260a648
edcd68c
 
65c80c2
bf8e0d3
edcd68c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import logging

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Model and tokenizer setup
def setup_model_and_tokenizer():
    logger.info("Loading model and tokenizer...")
    model_name = "umairrrkhan/english-text-generation"  
    model = AutoModelForCausalLM.from_pretrained(model_name)
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    # Ensure pad_token is set
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    if model.config.pad_token_id is None:
        model.config.pad_token_id = tokenizer.pad_token_id

    logger.info("Model and tokenizer loaded successfully.")
    return model, tokenizer

model, tokenizer = setup_model_and_tokenizer()

# Define text generation function
def generate_text(prompt):
    logger.info(f"Received prompt: {prompt}")
    inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)

    try:
        logger.info("Generating text...")
        outputs = model.generate(
            inputs['input_ids'],
            max_length=50,
            attention_mask=inputs['attention_mask'],
            do_sample=True,
            temperature=0.7,
            top_k=50,
        )
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        logger.info(f"Generated response: {response}")
        return response
    except Exception as e:
        logger.error(f"Error during text generation: {e}")
        return "An error occurred during text generation."

# Create Gradio interface
iface = gr.Interface(
    fn=generate_text,
    inputs="text",
    outputs="text",
    title="AI Text Generation Chatbot",
    description="Lowkey curious? Type a prompt and see what Its generate!",
    examples=["Tell me a story about a robot.", "Write a poem about the moon."]
)

# Launch the interface
if __name__ == "__main__":
    logger.info("Launching Gradio interface...")
    iface.launch(debug=True, server_name="0.0.0.0", server_port=7860)