Spaces:
Sleeping
Sleeping
prabinpanta0
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,12 @@ import gradio as gr
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
|
|
6 |
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-medium", clean_up_tokenization_spaces=True)
|
|
|
7 |
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium")
|
8 |
|
|
|
9 |
system_prompt = """
|
10 |
You are an AI model designed to provide concise information about big data analytics across various fields without mentioning the question. Respond with a focused, one-line answer that captures the essence of the key risk, benefit, or trend associated with the topic.
|
11 |
input: What do you consider the most significant risk of over-reliance on big data analytics in stock market risk management?
|
@@ -22,12 +25,18 @@ output: Potential widening of the achievement gap if data is not used equitably.
|
|
22 |
|
23 |
def generate(text):
|
24 |
try:
|
|
|
25 |
prompt = system_prompt + f"\ninput: {text}\noutput:"
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
30 |
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True).split("output:")[-1].strip()
|
|
|
31 |
return response_text if response_text else "No valid response generated."
|
32 |
|
33 |
except Exception as e:
|
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
6 |
+
# Load the model and tokenizer
|
7 |
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-medium", clean_up_tokenization_spaces=True)
|
8 |
+
|
9 |
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium")
|
10 |
|
11 |
+
# Define the initial prompt for the system
|
12 |
system_prompt = """
|
13 |
You are an AI model designed to provide concise information about big data analytics across various fields without mentioning the question. Respond with a focused, one-line answer that captures the essence of the key risk, benefit, or trend associated with the topic.
|
14 |
input: What do you consider the most significant risk of over-reliance on big data analytics in stock market risk management?
|
|
|
25 |
|
26 |
def generate(text):
|
27 |
try:
|
28 |
+
# Combine the system prompt with the user's input
|
29 |
prompt = system_prompt + f"\ninput: {text}\noutput:"
|
30 |
+
|
31 |
+
# Tokenize the input
|
32 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
33 |
+
|
34 |
+
# Generate the response
|
35 |
+
outputs = model.generate(inputs["input_ids"], max_length=256)
|
36 |
+
|
37 |
+
# Convert the output to text
|
38 |
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True).split("output:")[-1].strip()
|
39 |
+
|
40 |
return response_text if response_text else "No valid response generated."
|
41 |
|
42 |
except Exception as e:
|