File size: 2,490 Bytes
48a29d5
 
 
 
 
 
 
 
 
 
d373e61
48a29d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b25a8be
 
 
 
c3125e8
b25a8be
 
c3125e8
b25a8be
c3125e8
b25a8be
 
 
 
 
 
 
 
 
 
 
 
874cf23
 
b25a8be
 
874cf23
 
 
 
 
b25a8be
874cf23
 
b25a8be
874cf23
 
b25a8be
874cf23
 
 
 
 
 
 
 
 
c3125e8
48a29d5
 
 
c3125e8
48a29d5
 
c3125e8
48a29d5
 
 
c3125e8
874cf23
48a29d5
 
874cf23
48a29d5
 
 
 
 
 
874cf23
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import gradio
from transformers import pipeline

# Initialize the Hugging Face model
model = pipeline(model='google/flan-t5-base')


# Define the chatbot function
def chatbot(input_text):
  # Generate a response from the Hugging Face model
  response = model(input_text, max_length=250, do_sample=True)[0]['generated_text'].strip()
  
  # Return the bot response
  return response

# Define the Gradio interface
gradio_interface = gradio.Interface(
    fn=chatbot,
    inputs='text',
    outputs='text',
    title='Chatbot',
    description='A weird chatbot conversations experience.',
    examples=[
        ['Hi, how are you?']
    ]
)

# Launch the Gradio interface
gradio_interface.launch()





# from dotenv import load_dotenv
# from langchain import HuggingFaceHub, LLMChain
# from langchain import PromptTemplates
# import gradio

# load_dotenv()
# os.getenv('HF_API')

# hub_llm = HuggingFaceHub(repo_id='facebook/blenderbot-400M-distill')

# prompt = prompt_templates(
#     input_variable = ["question"],
#     template = "Answer is: {question}"
# )

# hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)





# Sample code for AI language model interaction
# from transformers import GPT2Tokenizer, GPT2LMHeadModel
# import gradio


# def simptok(data):
#     # Load pre-trained model and tokenizer (using the transformers library)
#     model_name = "gpt2"
#     tokenizer = GPT2Tokenizer.from_pretrained(model_name)
#     model = GPT2LMHeadModel.from_pretrained(model_name)
    
#     # User input
#     user_input = data
    
#     # Tokenize input
#     input_ids = tokenizer.encode(user_input, return_tensors="pt")
    
#     # Generate response
#     output = model.generate(input_ids, max_length=50, num_return_sequences=1)
#     response = tokenizer.decode(output[0], skip_special_tokens=True)
#     return response


# def responsenew(data):
#     return simptok(data)


# from hugchat import hugchat
# import gradio as gr
# import time

# # Create a chatbot connection
# chatbot = hugchat.ChatBot(cookie_path="cookies.json")

# # New a conversation (ignore error)
# id = chatbot.new_conversation()
# chatbot.change_conversation(id)


# def get_answer(data):
#     return chatbot.chat(data)

# gradio_interface = gr.Interface(
#   fn = get_answer,
#   inputs = "text",
#   outputs = "text"
# )
# gradio_interface.launch()

# gradio_interface = gradio.Interface(
#   fn = responsenew,
#   inputs = "text",
#   outputs = "text"
# )
# gradio_interface.launch()