Spaces:
Runtime error
Runtime error
File size: 5,757 Bytes
322f74c 46b1d37 3a16188 f823e77 5f50ed6 2212a85 067d452 bb033c5 eb872a0 bb033c5 2ae9968 acee492 2ae9968 bb033c5 2ae9968 c71f802 bdb7dc3 c71f802 2ae9968 eb872a0 6f18f2a 731e7a8 03d49a3 fe1b079 03d49a3 b9bdc7e 3a16188 7672cd0 3a16188 42fc25c 3a16188 42fc25c 3a16188 42fc25c 3a16188 42fc25c 3a16188 42fc25c 3a16188 2d90ba4 3a16188 2d90ba4 3a16188 42fc25c 7672cd0 b9bdc7e 7672cd0 3a16188 8a99e10 3a16188 b94a687 3a16188 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import os
HF_TOKEN = os.environ["HF_TOKEN"]
# os.environ["BITSANDBYTES_NOWELCOME"] = "1"
import re
import spaces
import gradio as gr
import torch
print(f"Is CUDA available: {torch.cuda.is_available()}")
# True
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
# Tesla T4
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from huggingface_hub import login, HfFolder
# tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged16bit_clean_final", trust_remote_code=True)
# quantization_config = BitsAndBytesConfig(
# load_in_4bit=True,
# bnb_4bit_use_double_quant=True,
# bnb_4bit_quant_type="nf4",
# bnb_4bit_compute_dtype=torch.float16)
# model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma_merged16bit_clean_final",
# device_map="auto",
# quantization_config=quantization_config,
# torch_dtype =torch.float16,
# low_cpu_mem_usage=True,
# trust_remote_code=True)
# from transformers import AutoModelForCausalLM, AutoTokenizer
# from peft import PeftModel
# # 1. Load Your Base Model and LoRA Adapter
# model_name_or_path = "FlawedLLM/Bhashini_gemma_merged4bit_clean_final" # Hugging Face model or local path
# lora_weights = "FlawedLLM/Bhashini_gemma_lora_clean_final" # LoRA weights
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
# model = AutoModelForCausalLM.from_pretrained(model_name_or_path, load_in_8bit=True, device_map='auto')
# model = PeftModel.from_pretrained(model, lora_weights)
# Load model directly
# from transformers import AutoTokenizer, AutoModelForCausalLM
# bnb_config = BitsAndBytesConfig(
# load_in_4bit=True,
# llm_int8_threshold=6.0,
# )
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final")
model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final",
device_map="auto",)
# quantization_config=quantization_config,)
# alpaca_prompt = You MUST copy from above!
@spaces.GPU(duration=300)
def chunk_it(input_command, item_list):
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
if item_list is not None:
item_list = f'''The ItemName should be chosen from the given list : {item_list} , except when adding item. If ItemName does not find anything SIMILAR in the list, then the ItemName should be "Null" '''
inputs = tokenizer(
[
alpaca_prompt.format(
f'''
You will receive text input that you need to analyze to perform the following tasks:
transaction: Record the details of an item transaction.
last n days transactions: Retrieve transaction records for a specified time period.
view risk inventory: View inventory items based on a risk category.
view inventory: View inventory details.
new items: Add new items to the inventory.
old items: View old items in inventory.
report generation: Generate various inventory reports.
Required Parameters:
Each task requires specific parameters to execute correctly:
transaction:
ItemName (string)
ItemQt (quantity - integer)
Type (string: "sale" or "purchase" or "return")
ShelfNo (string or integer)
ReorderPoint (integer)
last n days transactions:
ItemName (string)
Duration (integer: number of days)
view risk inventory:
RiskType (string: "overstock", "understock", or Null for all risk types)
view inventory:
ItemName (string)
ShelfNo (string or integer)
new items:
ItemName (string)
SellingPrice (number)
CostPrice (number)
old items:
ShelfNo (string or integer)
report generation:
ItemName (string)
Duration (integer: number of days)
ReportType (string: "profit", "revenue", "inventory", or Null for all reports)
{item_list}
ALWAYS provide output in a JSON format.''', # instruction
input_command, # input
"", # output - leave this blank for generation!
)
], return_tensors = "pt").to("cuda")
outputs = model.generate(**inputs, max_new_tokens = 216, use_cache = True)
tokenizer.batch_decode(outputs)
reply=tokenizer.batch_decode(outputs)
# Regular expression pattern to match content between "### Response:" and "<|end_of_text|>"
pattern = r"### Response:\n(.*?)<eos>"
# Search for the pattern in the text
match = re.search(pattern, reply[0], re.DOTALL) # re.DOTALL allows '.' to match newlines
reply = match.group(1).strip() # Extract and remove extra whitespace
return reply
iface = gr.Interface(
fn=chunk_it,
inputs=[
gr.Textbox(label="Input Command", lines=3),
gr.Textbox(label="Item List", lines=5)
],
outputs="text",
title="Formatter Pro",
)
iface.launch(inline=False) |