Jonathanmann commited on
Commit
912a893
·
verified ·
1 Parent(s): 9712ef3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -17
app.py CHANGED
@@ -6,24 +6,33 @@ import os
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
  model_name = "Jonathanmann/GPT2-medium-SADnov21"
8
 
9
- # Load tokenizer and model from Hugging Face
10
- tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
11
- model = GPT2LMHeadModel.from_pretrained(model_name, use_auth_token=HF_TOKEN)
 
 
 
12
 
13
- # Define the text generation pipeline
14
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
 
15
 
16
- # Define a function for generating text
17
  def generate_text(prompt, max_length, temperature, top_k, top_p):
18
- response = generator(
19
- prompt,
20
- max_length=max_length,
21
- temperature=temperature,
22
- top_k=top_k,
23
- top_p=top_p,
24
- num_return_sequences=1
25
- )
26
- return response[0]["generated_text"]
 
 
 
27
 
28
  # Create the Gradio interface
29
  demo = gr.Interface(
@@ -40,5 +49,8 @@ demo = gr.Interface(
40
  description="A demo of Jonathanmann/GPT2-medium-SADnov21 with adjustable generation parameters."
41
  )
42
 
43
- # Launch the app
44
- demo.launch()
 
 
 
 
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
  model_name = "Jonathanmann/GPT2-medium-SADnov21"
8
 
9
+ # Load tokenizer and model from Hugging Face with error handling
10
+ try:
11
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
12
+ model = GPT2LMHeadModel.from_pretrained(model_name, use_auth_token=HF_TOKEN)
13
+ except Exception as e:
14
+ raise RuntimeError(f"Error loading model or tokenizer: {e}")
15
 
16
+ # Define the text generation pipeline with error handling
17
+ try:
18
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
19
+ except Exception as e:
20
+ raise RuntimeError(f"Error creating text generation pipeline: {e}")
21
 
22
+ # Define a function for generating text with error handling
23
  def generate_text(prompt, max_length, temperature, top_k, top_p):
24
+ try:
25
+ response = generator(
26
+ prompt,
27
+ max_length=max_length,
28
+ temperature=temperature,
29
+ top_k=top_k,
30
+ top_p=top_p,
31
+ num_return_sequences=1
32
+ )
33
+ return response[0]["generated_text"]
34
+ except Exception as e:
35
+ return f"An error occurred during text generation: {str(e)}"
36
 
37
  # Create the Gradio interface
38
  demo = gr.Interface(
 
49
  description="A demo of Jonathanmann/GPT2-medium-SADnov21 with adjustable generation parameters."
50
  )
51
 
52
+ # Launch the app with error handling
53
+ try:
54
+ demo.launch()
55
+ except Exception as e:
56
+ print(f"An error occurred while launching the Gradio interface: {str(e)}")