Shanat commited on
Commit
c4b8230
1 Parent(s): 0c47534

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +45 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Sat Oct 5 16:41:22 2024
4
+
5
+ @author: Admin
6
+ """
7
+
8
+ import gradio as gr
9
+ from transformers import pipeline
10
+ import os
11
+ from huggingface_hub import login
12
+ from transformers import AutoModelForCausalLM, AutoTokenizer
13
+ import torch
14
+
15
+
16
+ #chatbot = pipeline(model="NCSOFT/Llama-3-OffsetBias-8B")
17
+ #token = os.getenv("HF_TOKEN")
18
+ login(token = os.getenv('HF_TOKEN'))
19
+ #chatbot = pipeline(model="meta-llama/Llama-3.2-1B")
20
+
21
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
22
+ model = AutoModelForCausalLM.from_pretrained(
23
+ "meta-llama/Llama-3.2-1B-Instruct",
24
+ device_map="auto",
25
+ torch_dtype="auto",
26
+ )
27
+
28
+ #chatbot = pipeline(model="facebook/blenderbot-400M-distill")
29
+
30
+ message_list = []
31
+ response_list = []
32
+
33
+
34
+ def vanilla_chatbot(message, history):
35
+ inputs = tokenizer(message, return_tensors="pt").to("cpu")
36
+ with torch.no_grad():
37
+ outputs = model.generate(inputs.input_ids, max_length=100)
38
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
39
+ #conversation = chatbot(message)
40
+
41
+ #return conversation[0]['generated_text']
42
+
43
+ demo_chatbot = gr.ChatInterface(vanilla_chatbot, title="Vanilla Chatbot", description="Enter text to start chatting.")
44
+
45
+ demo_chatbot.launch(True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch
4
+ huggingface_hub
5
+ accelerate>=0.26.0'