Arnesh27 commited on
Commit
ab3d718
·
verified ·
1 Parent(s): 070f660

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -19
app.py CHANGED
@@ -1,24 +1,18 @@
1
  import gradio as gr
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load model and tokenizer
5
- model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/starchat2-15b-v0.1")
6
- tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/starchat2-15b-v0.1")
7
 
8
- # Function for inference
9
- def generate_text(input_text):
10
- inputs = tokenizer(input_text, return_tensors="pt")
11
- outputs = model.generate(**inputs)
12
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
13
 
14
- # Gradio interface
15
- iface = gr.Interface(
16
- fn=generate_text,
17
- inputs="text",
18
- outputs="text",
19
- title="Project Build",
20
- description="Generate text using the StarChat model."
21
- )
22
-
23
- # Launch the app
24
- iface.launch()
 
1
  import gradio as gr
2
+ import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
+ # Load a smaller model or in half-precision
6
+ model = AutoModelForCausalLM.from_pretrained("distilgpt2", torch_dtype=torch.float16)
7
+ tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
8
 
9
+ def generate_text(inputs):
10
+ responses = []
11
+ for input_text in inputs:
12
+ input_tensor = tokenizer(input_text, return_tensors="pt")
13
+ output = model.generate(**input_tensor)
14
+ responses.append(tokenizer.decode(output[0], skip_special_tokens=True))
15
+ return responses
16
 
17
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", allow_flagging="never")
18
+ iface.launch()