prabinpanta0 commited on
Commit
ea6f382
·
verified ·
1 Parent(s): 6f76bc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -39
app.py CHANGED
@@ -1,46 +1,56 @@
1
  import os
2
- # import json
3
- # import gradio as gr
4
- # from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
- # tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it")
7
- # model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it")
8
  HF_TOKEN = os.getenv('HF')
9
- print(HF_TOKEN)
10
- # def generate(text):
11
- # try:
12
- # # Tokenize the input text
13
- # inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True)
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- # # Generate the response
16
- # outputs = model.generate(
17
- # inputs["input_ids"],
18
- # max_length=1024,
19
- # num_beams=5,
20
- # early_stopping=True,
21
- # )
22
 
23
- # # Decode the output tokens to text
24
- # response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
 
26
- # return response_text if response_text else "No valid response generated."
27
 
28
- # except Exception as e:
29
- # return str(e)
30
-
31
- # iface = gr.Interface(
32
- # fn=generate,
33
- # inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
34
- # outputs="text",
35
- # title="Chuunibyou Text Generator",
36
- # description="Transform text into an elaborate and formal style with a nobleman tone.",
37
- # live=False
38
- # )
39
-
40
- # def launch_custom_interface():
41
- # iface.launch()
42
- # with gr.TabbedInterface(fn=generate, inputs=gr.Textbox(lines=2, placeholder="Enter text here..."), outputs=gr.HTML(label="Output")) as ti:
43
- # ti.add(custom_html)
44
-
45
- # if __name__ == "__main__":
46
- # launch_custom_interface()
 
1
  import os
2
+ import gradio as gr
3
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
+ from huggingface_hub import login
5
 
6
+ # Get the Hugging Face token from environment variables
 
7
  HF_TOKEN = os.getenv('HF')
8
+
9
+ if not HF_TOKEN:
10
+ raise ValueError("The HF environment variable is not set. Please set it to your Hugging Face token.")
11
+
12
+ # Authenticate with Hugging Face
13
+ login(HF_TOKEN)
14
+
15
+ # Load the model and tokenizer using the Hugging Face token
16
+ model_name = "google/gemma-7b-it"
17
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
18
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=True)
19
+
20
+ def generate(text):
21
+ try:
22
+ # Tokenize the input text
23
+ inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True)
24
 
25
+ # Generate the response
26
+ outputs = model.generate(
27
+ inputs["input_ids"],
28
+ max_length=1024,
29
+ num_beams=5,
30
+ early_stopping=True,
31
+ )
32
 
33
+ # Decode the output tokens to text
34
+ response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
 
36
+ return response_text if response_text else "No valid response generated."
37
 
38
+ except Exception as e:
39
+ return str(e)
40
+
41
+ iface = gr.Interface(
42
+ fn=generate,
43
+ inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
44
+ outputs="text",
45
+ title="Chuunibyou Text Generator",
46
+ description="Transform text into an elaborate and formal style with a nobleman tone.",
47
+ live=False
48
+ )
49
+
50
+ def launch_custom_interface():
51
+ iface.launch()
52
+ with gr.TabbedInterface(fn=generate, inputs=gr.Textbox(lines=2, placeholder="Enter text here..."), outputs=gr.HTML(label="Output")) as ti:
53
+ ti.add(custom_html)
54
+
55
+ if __name__ == "__main__":
56
+ launch_custom_interface()