ubiodee commited on
Commit
f55bfeb
·
verified ·
1 Parent(s): ae62c1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -14
app.py CHANGED
@@ -2,28 +2,45 @@ import gradio as gr
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
- # Load the model and tokenizer
6
- MODEL_NAME = "ubiodee/Cardano_plutus" # Your fine-tuned model
 
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
 
 
 
 
9
 
10
- # Function to generate response from the model
11
  def generate_response(prompt):
12
- inputs = tokenizer(prompt, return_tensors="pt")
 
13
  with torch.no_grad():
14
- output = model.generate(**inputs, max_length=512)
15
- response = tokenizer.decode(output[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  return response
17
 
18
- # Gradio Interface
19
- iface = gr.Interface(
20
  fn=generate_response,
21
- inputs=gr.Textbox(label="Enter your prompt"),
22
  outputs=gr.Textbox(label="Model Response"),
23
- title="Cardano Plutus AI",
24
- description="Type in your question or prompt related to Cardano Plutus and get a response from the AI model.",
25
- theme="default"
26
  )
27
 
28
- # Launch app
29
- iface.launch()
 
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
+ # Load model & tokenizer
6
+ MODEL_NAME = "ubiodee/Cardano_plutus"
7
+
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
+ model.eval()
11
+
12
+ if torch.cuda.is_available():
13
+ model.to("cuda")
14
 
15
+ # Response function
16
  def generate_response(prompt):
17
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
18
+
19
  with torch.no_grad():
20
+ outputs = model.generate(
21
+ **inputs,
22
+ max_new_tokens=200,
23
+ temperature=0.7,
24
+ top_p=0.9,
25
+ do_sample=True,
26
+ eos_token_id=tokenizer.eos_token_id,
27
+ pad_token_id=tokenizer.pad_token_id,
28
+ )
29
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+
31
+ # Remove the prompt from the output to return only the answer
32
+ if response.startswith(prompt):
33
+ response = response[len(prompt):].strip()
34
+
35
  return response
36
 
37
+ # Gradio UI
38
+ demo = gr.Interface(
39
  fn=generate_response,
40
+ inputs=gr.Textbox(label="Enter your prompt", lines=4, placeholder="Ask about Plutus..."),
41
  outputs=gr.Textbox(label="Model Response"),
42
+ title="Cardano Plutus AI Assistant",
43
+ description="Ask questions about Plutus smart contracts or Cardano blockchain."
 
44
  )
45
 
46
+ demo.launch()