savan360 commited on
Commit
93d7a55
·
verified ·
1 Parent(s): 284bd7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -1,28 +1,23 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Use a different language model: GPT-Neo (125M)
5
- generator = pipeline('text-generation', model='EleutherAI/gpt-neo-125M')
6
 
7
- def generate_text(prompt):
8
- # Adjust parameters to reduce repetition and shorten output
9
- generated = generator(
10
- prompt,
11
- max_length=30, # Limit the output length
12
- do_sample=True, # Enable sampling for varied responses
13
- temperature=0.2, # Lower temperature for less randomness
14
- repetition_penalty=1.5, # Penalize repetitive tokens
15
- no_repeat_ngram_size=2 # Prevent repeating 2-word sequences
16
- )
17
- return generated[0]['generated_text']
18
 
19
- # Create the Gradio interface
20
  iface = gr.Interface(
21
- fn=generate_text,
22
  inputs="text",
23
  outputs="text",
24
- title="Simple LLM with GPT-Neo & Gradio",
25
- description="Enter a prompt and get a concise response using GPT-Neo."
26
  )
27
 
28
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Load a question-answering model instead of a text generator
5
+ qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
6
 
7
+ def get_answer(question):
8
+ context = """
9
+ London is the capital of the United Kingdom. The UK consists of England, Scotland, Wales, and Northern Ireland.
10
+ """
11
+ answer = qa_pipeline(question=question, context=context)
12
+ return answer["answer"]
 
 
 
 
 
13
 
14
+ # Create Gradio Interface
15
  iface = gr.Interface(
16
+ fn=get_answer,
17
  inputs="text",
18
  outputs="text",
19
+ title="Ask Any Question",
20
+ description="Ask factual questions and get precise answers."
21
  )
22
 
23
  iface.launch()