File size: 3,111 Bytes
48a29d5 4816b42 8c76e53 48a29d5 4816b42 a1e4869 48a29d5 7edbcbc 0e1dacf 4816b42 48a29d5 0e1dacf 4816b42 48a29d5 b25a8be c3125e8 b25a8be c3125e8 b25a8be c3125e8 b25a8be 874cf23 b25a8be 874cf23 b25a8be 874cf23 b25a8be 874cf23 b25a8be 874cf23 c3125e8 48a29d5 c3125e8 48a29d5 c3125e8 48a29d5 c3125e8 874cf23 48a29d5 874cf23 48a29d5 874cf23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
import gradio
# from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM
import os
os.getenv("HF_TOKEN")
# Initialize the Hugging Face model
# model = pipeline(model='google/flan-t5-base')
tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b", use_auth_token=True)
model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", use_auth_token=True)
# Define the chatbot function
def chatbot(input_text):
prompt = f"Give the answer of the given input in context from the bhagwat geeta. give suggestions to user which are based upon the meanings of shlok in bhagwat geeta, input = {input_text}"
# Generate a response from the Hugging Face model
# response = model(prompt, max_length=250, do_sample=True)[0]['generated_text'].strip()
input_text = "Write me a poem about Machine Learning."
input_ids = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**input_ids)
# Return the bot response
return outputs
# Define the Gradio interface
gradio_interface = gradio.Interface(
fn=chatbot,
inputs='text',
outputs='text',
title='Chatbot',
description='A weird chatbot conversations experience.',
examples=[
['Hi, how are you?']
]
)
# Launch the Gradio interface
gradio_interface.launch()
# from dotenv import load_dotenv
# from langchain import HuggingFaceHub, LLMChain
# from langchain import PromptTemplates
# import gradio
# load_dotenv()
# os.getenv('HF_API')
# hub_llm = HuggingFaceHub(repo_id='facebook/blenderbot-400M-distill')
# prompt = prompt_templates(
# input_variable = ["question"],
# template = "Answer is: {question}"
# )
# hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
# Sample code for AI language model interaction
# from transformers import GPT2Tokenizer, GPT2LMHeadModel
# import gradio
# def simptok(data):
# # Load pre-trained model and tokenizer (using the transformers library)
# model_name = "gpt2"
# tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# model = GPT2LMHeadModel.from_pretrained(model_name)
# # User input
# user_input = data
# # Tokenize input
# input_ids = tokenizer.encode(user_input, return_tensors="pt")
# # Generate response
# output = model.generate(input_ids, max_length=50, num_return_sequences=1)
# response = tokenizer.decode(output[0], skip_special_tokens=True)
# return response
# def responsenew(data):
# return simptok(data)
# from hugchat import hugchat
# import gradio as gr
# import time
# # Create a chatbot connection
# chatbot = hugchat.ChatBot(cookie_path="cookies.json")
# # New a conversation (ignore error)
# id = chatbot.new_conversation()
# chatbot.change_conversation(id)
# def get_answer(data):
# return chatbot.chat(data)
# gradio_interface = gr.Interface(
# fn = get_answer,
# inputs = "text",
# outputs = "text"
# )
# gradio_interface.launch()
# gradio_interface = gradio.Interface(
# fn = responsenew,
# inputs = "text",
# outputs = "text"
# )
# gradio_interface.launch()
|