harrypy commited on
Commit
c86b6ab
Β·
1 Parent(s): 3fa00b5

chore: fix app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -9
app.py CHANGED
@@ -1,17 +1,63 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
  import spaces
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
 
 
 
 
 
 
 
 
 
6
 
7
  @spaces.GPU
8
- def predict(input_img):
9
- predictions = pipeline(input_img)
10
- return input_img, {p["label"]: p["score"] for p in predictions}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  gradio_app = gr.Interface(
13
- predict,
14
- inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
15
- outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
16
- title="Hot Dog? Or Not?",
17
- ).launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
  import spaces
4
+ import gradio as gr
5
+
6
+ # pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
7
+
8
+ # @spaces.GPU
9
+ # def predict(input_img):
10
+ # predictions = pipeline(input_img)
11
+ # return input_img, {p["label"]: p["score"] for p in predictions}
12
+
13
+ # gradio_app = gr.Interface(
14
+ # predict,
15
+ # inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
16
+ # outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
17
+ # title="Hot Dog? Or Not?",
18
+ # ).launch()
19
 
20
+ from transformers import AutoModelForCausalLM, AutoTokenizer
21
+ import torch
22
+ device = "cuda" # the device to load the model onto
23
+
24
+ model = AutoModelForCausalLM.from_pretrained(
25
+ "vilm/VinaLlama2-14B",
26
+ torch_dtype='auto',
27
+ device_map="auto"
28
+ )
29
+ tokenizer = AutoTokenizer.from_pretrained("vilm/VinaLlama2-14B")
30
 
31
  @spaces.GPU
32
+ def generate_response(input_text):
33
+ prompt = input_text
34
+ messages = [
35
+ {"role": "system", "content": "Bẑn là trợ lí AI hữu ích."},
36
+ {"role": "user", "content": prompt}
37
+ ]
38
+ text = tokenizer.apply_chat_template(
39
+ messages,
40
+ tokenize=False,
41
+ add_generation_prompt=True
42
+ )
43
+ model_inputs = tokenizer([text], return_tensors="pt").to(device)
44
+
45
+ generated_ids = model.generate(
46
+ model_inputs.input_ids,
47
+ max_new_tokens=1024,
48
+ eos_token_id=tokenizer.eos_token_id,
49
+ temperature=0.25,
50
+ )
51
+ generated_ids = [
52
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
53
+ ]
54
+
55
+ response = tokenizer.batch_decode(generated_ids)[0]
56
+ return response
57
 
58
  gradio_app = gr.Interface(
59
+ generate_response,
60
+ inputs="text",
61
+ outputs="text",
62
+ title="AI Chatbot",
63
+ ).launch()