import torch import spaces from unsloth import FastLanguageModel model, tokenizer = FastLanguageModel.from_pretrained( model_name = "VanguardAI/BhashiniLLaMa3-8B_LoRA_Adapters", max_seq_length = 2048, dtype = None, load_in_4bit = True,) FastLanguageModel.for_inference(model) condition= ''' ALWAYS provide output in a JSON format. ''' alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {} ### Input: {} ### Response: {}""" @spaces.GPU(duration=300) def chunk_it(inventory_list,user_input_text): inputs = tokenizer( [ alpaca_prompt.format( ''' You will receive text input that you need to analyze to perform the following tasks: transaction: Record the details of an item transaction. last n days transactions: Retrieve transaction records for a specified time period. view risk inventory: View inventory items based on a risk category. view inventory: View inventory details. new items: Add new items to the inventory. report generation: Generate various inventory reports. delete item: Delete an existing Item. Required Parameters: Each task requires specific parameters to execute correctly: transaction: ItemName (string) ItemQt (quantity - integer) Type (string: "sale" or "purchase" or "return") ReorderPoint (integer) last n days transactions: ItemName (string) Duration (integer: number of days, if user input is in weeks, months or years then convert to days) view risk inventory: RiskType (string: "overstock", "understock", or "Null" for all risk types) view inventory: ItemName (string) new items: ItemName (string) SellingPrice (number) CostPrice (number) report generation: ItemName (string) Duration (integer: number of days, if user input is in weeks, months or years then convert to days ) ReportType (string: "profit", "revenue", "inventory", or "Null" for all reports) The ItemName must always be matched from the below list of names, EXCEPT for when the Function is "new items". '''+ inventory_list + ''' ALWAYS provide output in a JSON format. ''', # instruction user_input_text, # input "", # output - leave this blank for generation! ) ], return_tensors = "pt").to("cuda") outputs = model.generate(**inputs, max_new_tokens = 216, use_cache = True) content= tokenizer.batch_decode(outputs) return content iface=gr.Interface(fn=chunk_it, inputs="text", outputs="text", title="Bhashini_LLaMa_LoRA", ) iface = gr.Interface( fn=chunk_it, inputs=[ gr.Textbox(label="user_input_text", lines=3), gr.Textbox(label="inventory_list", lines=3) ], outputs="text", title="SomeModel", ) iface.launch(inline=False)