# -*- coding: utf-8 -*- | |
""" | |
Created on Sat Oct 5 16:41:22 2024 | |
@author: Admin | |
""" | |
import gradio as gr | |
from transformers import pipeline | |
import os | |
from huggingface_hub import login | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
chatbot = pipeline(model="microsoft/Phi-3.5-mini-instruct") | |
#token = os.getenv("HF_TOKEN") | |
#login(token = os.getenv('HF_TOKEN')) | |
#chatbot = pipeline(model="meta-llama/Llama-3.2-1B") | |
#tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct") | |
#model = AutoModelForCausalLM.from_pretrained( | |
# "meta-llama/Llama-3.2-1B-Instruct", | |
# device_map="auto", | |
# torch_dtype="auto", | |
#) | |
#chatbot = pipeline(model="facebook/blenderbot-400M-distill") | |
message_list = [] | |
response_list = [] | |
def vanilla_chatbot(message, history): | |
#inputs = tokenizer(message, return_tensors="pt").to("cpu") | |
#with torch.no_grad(): | |
# outputs = model.generate(inputs.input_ids, max_length=100) | |
#return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
conversation = chatbot(message) | |
return conversation[0]['generated_text'] | |
demo_chatbot = gr.ChatInterface(vanilla_chatbot, title="Vanilla Chatbot", description="Enter text to start chatting.") | |
demo_chatbot.launch(True) |