ajeetkumar01 commited on
Commit
8d7fa74
·
verified ·
1 Parent(s): d1c3444

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -25
app.py CHANGED
@@ -29,24 +29,24 @@ def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_re
29
  generated_text = tokenizer.decode(output[0])
30
  return generated_text
31
 
32
- def generate_text_with_nucleus_search(input_text, max_length=16, do_sample=True, top_p=0.9):
33
- """
34
- Generate text with nucleus sampling based on the given input text.
35
- Parameters:
36
- - input_text (str): The input text to start generation from.
37
- - max_length (int): Maximum length of the generated text.
38
- - do_sample (bool): Whether to use sampling or not.
39
- - top_p (float): Nucleus sampling parameter.
40
- Returns:
41
- - generated_text (str): The generated text.
42
- """
43
- # Encode the input text and move it to the appropriate device
44
- input_ids = tokenizer(input_text, return_tensors='pt')['input_ids']
45
- # Generate text using nucleus sampling
46
- output = model.generate(input_ids, max_length=max_length, do_sample=do_sample, top_p=top_p)
47
- # Decode the generated output
48
- generated_text = tokenizer.decode(output[0])
49
- return generated_text
50
 
51
  # Create Gradio input interface
52
  input_text_interface = gr.Textbox(lines=5, label="Input Text", placeholder="Enter text for generation...")
@@ -61,14 +61,14 @@ interface1 = gr.Interface(generate_text, input_text_interface, output_text_inter
61
  allow_flagging="never")
62
 
63
  # Create Gradio output interface for text generation with nucleus sampling
64
- output_text_interface2 = gr.Textbox(label="Generated Text (Nucleus Sampling)", placeholder="Generated text will appear here...")
65
 
66
- # Interface for text generation with nucleus sampling
67
- interface2 = gr.Interface(generate_text_with_nucleus_search, input_text_interface, output_text_interface2,
68
- title="Text Generation with Nucleus Sampling",
69
- description="Generate text using nucleus sampling with the GPT-2 model.",
70
- allow_flagging="never")
71
 
72
  # Launch both interfaces
73
  interface1.launch(share=True)
74
- interface2.launch(share=True)
 
29
  generated_text = tokenizer.decode(output[0])
30
  return generated_text
31
 
32
+ # def generate_text_with_nucleus_search(input_text, max_length=16, do_sample=True, top_p=0.9):
33
+ # """
34
+ # Generate text with nucleus sampling based on the given input text.
35
+ # Parameters:
36
+ # - input_text (str): The input text to start generation from.
37
+ # - max_length (int): Maximum length of the generated text.
38
+ # - do_sample (bool): Whether to use sampling or not.
39
+ # - top_p (float): Nucleus sampling parameter.
40
+ # Returns:
41
+ # - generated_text (str): The generated text.
42
+ # """
43
+ # # Encode the input text and move it to the appropriate device
44
+ # input_ids = tokenizer(input_text, return_tensors='pt')['input_ids']
45
+ # # Generate text using nucleus sampling
46
+ # output = model.generate(input_ids, max_length=max_length, do_sample=do_sample, top_p=top_p)
47
+ # # Decode the generated output
48
+ # generated_text = tokenizer.decode(output[0])
49
+ # return generated_text
50
 
51
  # Create Gradio input interface
52
  input_text_interface = gr.Textbox(lines=5, label="Input Text", placeholder="Enter text for generation...")
 
61
  allow_flagging="never")
62
 
63
  # Create Gradio output interface for text generation with nucleus sampling
64
+ # output_text_interface2 = gr.Textbox(label="Generated Text (Nucleus Sampling)", placeholder="Generated text will appear here...")
65
 
66
+ # # Interface for text generation with nucleus sampling
67
+ # interface2 = gr.Interface(generate_text_with_nucleus_search, input_text_interface, output_text_interface2,
68
+ # title="Text Generation with Nucleus Sampling",
69
+ # description="Generate text using nucleus sampling with the GPT-2 model.",
70
+ # allow_flagging="never")
71
 
72
  # Launch both interfaces
73
  interface1.launch(share=True)
74
+ # interface2.launch(share=True)