Spaces:
Sleeping
Sleeping
File size: 1,448 Bytes
85f8119 ff3d29a 85f8119 ff3d29a 85f8119 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
# Install the necessary packages
# pip install accelerate transformers fastapi pydantic torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from pydantic import BaseModel
from fastapi import FastAPI
# Import the required library
from transformers import pipeline
# Load the tokenizer and model
# Initialize the FastAPI app
app = FastAPI(docs_url="/")
# Define the request model
class RequestModel(BaseModel):
input: str
# Define a greeting endpoint
@app.get("/")
def greet_json():
return {"message": "working..."}
# Define the text generation endpoint
@app.post("/generatetext")
def get_response(request: RequestModel):
# Define the task and model
task = "text-generation"
model_name = "gpt2"
# Define the input text, maximum output length, and the number of return sequences
input_text = "he draw to the town "
max_output_length = 50
num_of_return_sequences = 1
# Initialize the text generation pipeline
text_generator = pipeline(
task,
model=model_name
)
# Generate text sequences
generated_texts = text_generator(
input_text,
max_length=max_output_length,
num_return_sequences=num_of_return_sequences
)
# Print the generated text sequences
for i, text in enumerate(generated_texts):
print(f"Generated Text {1}: {text['generated_text']}")
return {"generated_text": text['generated_text']}
# To run the FastAPI app, use the command: uvicorn <filename>:app --reload
|