Wtzwho commited on
Commit
1a4c5e6
1 Parent(s): a1b6133

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -17,9 +17,11 @@ text_generation_pipeline = pipeline(
17
  def generate_text(user_input):
18
  messages = [{"role": "user", "content": user_input}]
19
  prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
20
  outputs = text_generation_pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
21
  return outputs[0]["generated_text"]
22
 
 
23
  # Create the Gradio interface without the unsupported argument
24
  iface = gr.Interface(
25
  fn=generate_text,
@@ -29,5 +31,6 @@ iface = gr.Interface(
29
  description="A text generation model that understands your queries and generates concise, informative responses."
30
  )
31
 
32
- # Launch the interface
33
- iface.launch() # Removed the enable_queue=True argument
 
 
17
  def generate_text(user_input):
18
  messages = [{"role": "user", "content": user_input}]
19
  prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
20
+ # Notice: No `use_auth_token` here. It's only needed when loading the model initially.
21
  outputs = text_generation_pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
22
  return outputs[0]["generated_text"]
23
 
24
+
25
  # Create the Gradio interface without the unsupported argument
26
  iface = gr.Interface(
27
  fn=generate_text,
 
31
  description="A text generation model that understands your queries and generates concise, informative responses."
32
  )
33
 
34
+ # Launch the interface with the share=True parameter to create a public link
35
+ iface.launch(share=True)
36
+