Spaces:
Sleeping
Sleeping
Update space
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ For more information on `huggingface_hub` Inference API support, please check th
|
|
7 |
# client = InferenceClient("karanzrk/bert-Causal-QA")
|
8 |
|
9 |
from transformers import pipeline
|
10 |
-
generator = pipeline('text-generation', model = 'karanzrk/bert-Causal-QA', tokenizer="bert-base-uncased")
|
11 |
# generator("Hello, I'm a language model", max_length = 30, num_return_sequences=3)
|
12 |
|
13 |
|
@@ -64,6 +64,7 @@ def inference(text):
|
|
64 |
# classifier = pipeline("text-classification", model="karanzrk/essayl0")
|
65 |
text = "Question: " + text
|
66 |
output = generator(text)
|
|
|
67 |
return output
|
68 |
|
69 |
# launcher = gr.Interface(
|
|
|
7 |
# client = InferenceClient("karanzrk/bert-Causal-QA")
|
8 |
|
9 |
from transformers import pipeline
|
10 |
+
generator = pipeline('text-generation', model = 'karanzrk/bert-Causal-QA', tokenizer="bert-base-uncased", max_length = 128)
|
11 |
# generator("Hello, I'm a language model", max_length = 30, num_return_sequences=3)
|
12 |
|
13 |
|
|
|
64 |
# classifier = pipeline("text-classification", model="karanzrk/essayl0")
|
65 |
text = "Question: " + text
|
66 |
output = generator(text)
|
67 |
+
answer = output["generated_text"]
|
68 |
return output
|
69 |
|
70 |
# launcher = gr.Interface(
|