Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,329 Bytes
b3364b6 dd389e3 e06c2d9 dd389e3 e5eff74 dd389e3 b3364b6 dd389e3 b3364b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the model and tokenizer
model_name = "Artples/L-MChat-Small"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Ensure the model uses CPU
device = torch.device("cpu")
model.to(device)
def chat_with_model(json_input):
prompt = json_input['prompt']
# Tokenize the input prompt
inputs = tokenizer.encode(prompt, return_tensors="pt")
inputs = inputs.to(device)
# Generate a response
output = model.generate(inputs, max_length=100, num_return_sequences=1)
response_text = tokenizer.decode(output[0], skip_special_tokens=True)
return {"choices": [{"text": response_text}]}
# Define the JSON input component
json_schema = {
"title": "Request",
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "Enter your prompt here."
}
},
"required": ["prompt"]
}
# Create Gradio interface
iface = gr.Interface(
fn=chat_with_model,
inputs=gr.inputs.JSON(schema=json_schema),
outputs="json",
title="Chat with L-MChat-7b",
description="API-like interface using Gradio to simulate OpenAI API behavior."
)
# Run the Gradio app
iface.launch()
|