anto18671 commited on
Commit
2c7a1e2
·
verified ·
1 Parent(s): baee762

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -23
app.py CHANGED
@@ -1,26 +1,13 @@
1
- from flask import Flask, request, jsonify
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
 
5
  # Initialize the model and tokenizer
6
  model = AutoModelForCausalLM.from_pretrained("anto18671/lumenspark", trust_remote_code=True)
7
  tokenizer = AutoTokenizer.from_pretrained("anto18671/lumenspark", trust_remote_code=True)
8
 
9
- # Set up Flask application
10
- app = Flask(__name__)
11
-
12
- # Define inference endpoint
13
- @app.route("/generate", methods=["POST"])
14
- def generate_text():
15
- data = request.get_json()
16
-
17
- # Extract the input text
18
- text = data.get("text", "")
19
- if not text:
20
- return jsonify({"error": "Input text is required"}), 400
21
-
22
  # Tokenize input text
23
- encoded_input = tokenizer(text, return_tensors='pt')
24
 
25
  # Generate text using the model
26
  output = model.generate(
@@ -37,10 +24,16 @@ def generate_text():
37
 
38
  # Decode the generated text
39
  decoded_text = tokenizer.decode(output[0], skip_special_tokens=True)
40
-
41
- # Return generated text as JSON response
42
- return jsonify({"generated_text": decoded_text})
43
-
44
- # Run the Flask app
45
- if __name__ == "__main__":
46
- app.run(host="0.0.0.0", port=5000)
 
 
 
 
 
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
 
4
  # Initialize the model and tokenizer
5
  model = AutoModelForCausalLM.from_pretrained("anto18671/lumenspark", trust_remote_code=True)
6
  tokenizer = AutoTokenizer.from_pretrained("anto18671/lumenspark", trust_remote_code=True)
7
 
8
+ def generate_text(input_text):
 
 
 
 
 
 
 
 
 
 
 
 
9
  # Tokenize input text
10
+ encoded_input = tokenizer(input_text, return_tensors='pt')
11
 
12
  # Generate text using the model
13
  output = model.generate(
 
24
 
25
  # Decode the generated text
26
  decoded_text = tokenizer.decode(output[0], skip_special_tokens=True)
27
+ return decoded_text
28
+
29
+ # Set up Gradio interface
30
+ interface = gr.Interface(
31
+ fn=generate_text,
32
+ inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."),
33
+ outputs="text",
34
+ title="Text Generator",
35
+ description="Generate text using the Lumenspark model."
36
+ )
37
+
38
+ # Launch the interface
39
+ interface.launch()