dpaul93 commited on
Commit
3a7fb07
·
verified ·
1 Parent(s): 19b5891

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -3
app.py CHANGED
@@ -1,7 +1,74 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  iface.launch()
 
1
  import gradio as gr
2
+ import pandas as pd
3
+ import json
4
+ import os
5
+ from pprint import pprint
6
+ import bitsandbytes as bnb
7
+ import torch
8
+ import torch.nn as nn
9
+ import transformers
10
+ from datasets import load_dataset, Dataset
11
+ from huggingface_hub import notebook_login
12
 
13
+ from peft import LoraConfig, PeftConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
14
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
15
 
16
+
17
+ bnb_config = BitsAndBytesConfig(
18
+ load_in_4bit=True,
19
+ bnb_4bit_use_double_quant=True,
20
+ bnb_4bit_quant_type="nf4",
21
+ bnb_4bit_compute_dtype=torch.bfloat16
22
+ )
23
+
24
+ PEFT_MODEL = "dpaul93/falcon-7b-qlora-chat-claim-data" #"/content/trained-model"
25
+
26
+ config = PeftConfig.from_pretrained(PEFT_MODEL)
27
+
28
+ config.base_model_name_or_path = "tiiuae/falcon-7b"
29
+
30
+ model = AutoModelForCausalLM.from_pretrained(
31
+ config.base_model_name_or_path,
32
+ return_dict=True,
33
+ quantization_config=bnb_config,
34
+ device_map="auto",
35
+ trust_remote_code=True
36
+ )
37
+
38
+ tokenizer=AutoTokenizer.from_pretrained(config.base_model_name_or_path)
39
+ tokenizer.pad_token = tokenizer.eos_token
40
+
41
+ model = PeftModel.from_pretrained(model, PEFT_MODEL)
42
+
43
+ def generate_test_prompt(text):
44
+ return f"""Given the following claim:
45
+ {data_point["claim"]}.
46
+ pick one of the following option
47
+ (a) true
48
+ (b) false
49
+ (c) mixture
50
+ (d) unknown
51
+ (e) not_applicable?""".strip()
52
+
53
+ def generate_and_tokenize_prompt(text):
54
+ prompt = generate_test_prompt(text)
55
+ device = "cuda"
56
+ encoding = tokenizer(prompt, return_tensors="pt").to(device)
57
+ with torch.inference_mode():
58
+ outputs = model.generate(
59
+ input_ids = encoding.input_ids,
60
+ attention_mask = encoding.attention_mask,
61
+ generation_config = generation_config
62
+ )
63
+
64
+
65
+
66
+ return tokenizer.decode(outputs[0], skip_special_tokens=True).split("Answer:")[1].split("\n")[0].split(".")[0]
67
+
68
+
69
+ def classifyUsingLLAMA(text):
70
+
71
+ return generate_and_tokenize_prompt(text)
72
+
73
+ iface = gr.Interface(fn=classifyUsingLLAMA, inputs="text", outputs="text")
74
  iface.launch()