Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from langchain.chains import LLMChain | |
from langchain_core.prompts import PromptTemplate | |
# Load the Hugging Face API token from environment variables | |
hf_token = os.getenv('HF_TOKEN') | |
if hf_token is None: | |
raise ValueError("Hugging Face API token not found in environment variables.") | |
# Load the tokenizer and model using the API token | |
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", use_auth_token=hf_token) | |
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", use_auth_token=hf_token) | |
# Define your prompt template | |
prompt_template = PromptTemplate( | |
template=""" | |
You are an AI language model trained to generate code for reinforcement learning models. | |
Given a description of a trading strategy, you need to generate a prompt that can be used to create code for a reinforcement learning model. | |
The prompt should be clear, concise, and include all necessary details to implement the strategy in code. | |
Here is the description of the trading strategy: | |
"{strategy}" | |
Based on this description, generate a proper prompt that can be used to create the code for a reinforcement learning model. | |
The prompt should include the following details: | |
1. The type of reinforcement learning algorithm to be used (e.g., Q-learning, DQN, PPO, etc.). | |
2. The main components of the algorithm (e.g., state space, action space, reward function). | |
3. Any specific libraries or tools that should be used (e.g., TensorFlow, PyTorch, OpenAI Gym). | |
4. Additional parameters or configurations that are important for the strategy. | |
Output the prompt in a way that another AI model can use to generate the code. | |
""", | |
input_variables=["strategy"] | |
) | |
chain = LLMChain(llm=model, prompt=prompt_template) | |
st.title("Text to Prompt Generator") | |
st.write("Enter some text and get a prompt for a reinforcement learning algorithm:") | |
text_input = st.text_area("Enter text here:") | |
if st.button("Generate Prompt"): | |
if text_input: | |
# Format the input into the template and generate the prompt | |
prompt = prompt_template.format(strategy=text_input) | |
st.write("Generated Prompt:") | |
st.write(prompt) | |
else: | |
st.write("Please enter some text to generate a prompt.") |