Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import
|
3 |
|
|
|
4 |
model = AutoModelForCausalLM.from_pretrained('cbauer/groupchatGPT', trust_remote_code=True, ignore_mismatched_sizes=True)
|
5 |
tokenizer = AutoTokenizer.from_pretrained('cbauer/groupchatGPT')
|
6 |
|
|
|
7 |
generator = TextGenerationPipeline(model=model, tokenizer=tokenizer)
|
8 |
|
9 |
-
generator = pipeline('text-generation', model=model, trust_remote_code=True)
|
10 |
-
|
11 |
def generate(text):
|
12 |
result = generator(text, max_length=30, num_return_sequences=1)
|
13 |
return result[0]["generated_text"]
|
@@ -19,9 +19,9 @@ examples = [
|
|
19 |
|
20 |
demo = gr.Interface(
|
21 |
fn=generate,
|
22 |
-
inputs=gr.
|
23 |
-
outputs=gr.
|
24 |
examples=examples
|
25 |
)
|
26 |
|
27 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
|
3 |
|
4 |
+
# Load the model and tokenizer
|
5 |
model = AutoModelForCausalLM.from_pretrained('cbauer/groupchatGPT', trust_remote_code=True, ignore_mismatched_sizes=True)
|
6 |
tokenizer = AutoTokenizer.from_pretrained('cbauer/groupchatGPT')
|
7 |
|
8 |
+
# Initialize the Text Generation pipeline
|
9 |
generator = TextGenerationPipeline(model=model, tokenizer=tokenizer)
|
10 |
|
|
|
|
|
11 |
def generate(text):
|
12 |
result = generator(text, max_length=30, num_return_sequences=1)
|
13 |
return result[0]["generated_text"]
|
|
|
19 |
|
20 |
demo = gr.Interface(
|
21 |
fn=generate,
|
22 |
+
inputs=gr.components.Textbox(lines=5, label="Input Text"),
|
23 |
+
outputs=gr.components.Textbox(label="Generated Text"),
|
24 |
examples=examples
|
25 |
)
|
26 |
|
27 |
+
demo.launch()
|