Spaces:
Runtime error
Runtime error
import re | |
from peft import PeftModel, PeftConfig | |
from peft import AutoPeftModelForCausalLM | |
from transformers import AutoTokenizer | |
model = AutoPeftModelForCausalLM.from_pretrained( | |
"FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING | |
load_in_4bit = load_in_4bit, | |
) | |
tokenizer = AutoTokenizer.from_pretrained("lora_model") | |
def chunk_it(input_command): | |
inputs = tokenizer( | |
[ | |
alpaca_prompt.format( | |
''' | |
You will receive text input that you need to analyze to perform the following tasks: | |
transaction: Record the details of an item transaction. | |
last n days transactions: Retrieve transaction records for a specified time period. | |
view risk inventory: View inventory items based on a risk category. | |
view inventory: View inventory details. | |
new items: Add new items to the inventory. | |
old items: View old items in inventory. | |
report generation: Generate various inventory reports. | |
Required Parameters: | |
Each task requires specific parameters to execute correctly: | |
transaction: | |
ItemName (string) | |
ItemQt (quantity - integer) | |
Flow (string: "in" or "out") | |
ShelfNo (string or integer) | |
last n days transactions: | |
ItemName (string) | |
Duration (integer: number of days, default: 30) | |
view risk inventory: | |
RiskType (string: "overstock", "understock", or Null for all risk types) | |
view inventory: | |
ItemName (string) | |
ShelfNo (string or integer) | |
new items: | |
ItemName (string) | |
SellingPrice (number) | |
CostPrice (number) | |
old items: | |
ShelfNo (string or integer) | |
report generation: | |
ItemName (string) | |
Duration (integer: number of days, default: 6) | |
ReportType (string: "profit", "revenue", "inventory", or Null for all reports) | |
ALWAYS provide output in a JSON format.''', # instruction | |
input_command, # input | |
"", # output - leave this blank for generation! | |
) | |
], return_tensors = "pt").to("cuda") | |
outputs = model.generate(**inputs, max_new_tokens = 216, use_cache = True) | |
tokenizer.batch_decode(outputs) | |
reply=tokenizer.batch_decode(outputs) | |
# Regular expression pattern to match content between "### Response:" and "<|end_of_text|>" | |
pattern = r"### Response:\n(.*?)<\|end_of_text\|>" | |
# Search for the pattern in the text | |
match = re.search(pattern, reply[0], re.DOTALL) # re.DOTALL allows '.' to match newlines | |
reply = match.group(1).strip() # Extract and remove extra whitespace | |
return reply | |
iface=gr.Interface(fn=chunk_it, | |
inputs="text", | |
outputs="text", | |
title="Formatter_Pro", | |
) | |
iface.launch(inline=False) |