Akshat1000 commited on
Commit
1a2d379
1 Parent(s): 10e9eb7

Create Chatbot.py

Browse files
Files changed (1) hide show
  1. Chatbot.py +32 -0
Chatbot.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import necessary libraries
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+
5
+ # Load the tokenizer and model
6
+ tokenizer = AutoTokenizer.from_pretrained("chuanli11/Llama-3.2-3B-Instruct-uncensored")
7
+ model = AutoModelForCausalLM.from_pretrained("chuanli11/Llama-3.2-3B-Instruct-uncensored")
8
+
9
+ def generate_response(input_text):
10
+ # Encode the input text and generate response
11
+ inputs = tokenizer(input_text, return_tensors="pt")
12
+ outputs = model.generate(**inputs, max_length=150, do_sample=True, temperature=0.7, top_p=0.9)
13
+
14
+ # Decode the output to get the chatbot response
15
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
16
+ return response
17
+
18
+ def chat():
19
+ print("Chatbot: Hello! I'm here to assist you. Type 'exit' to end the conversation.")
20
+ while True:
21
+ user_input = input("You: ")
22
+ if user_input.lower() == "exit":
23
+ print("Chatbot: Goodbye!")
24
+ break
25
+
26
+ # Generate the response from the chatbot
27
+ response = generate_response(user_input)
28
+ print(f"Chatbot: {response}")
29
+
30
+ # Start the chat
31
+ if __name__ == "__main__":
32
+ chat()