Spaces:
Sleeping
Sleeping
File size: 2,450 Bytes
20fefb1 e474d15 20fefb1 b232c7a 20fefb1 92d45bc d312778 086dd5a 2754cd5 50f573d 2754cd5 92d45bc 2754cd5 92d45bc 2754cd5 20fefb1 39a4d43 20fefb1 50f573d 20fefb1 39a4d43 20fefb1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import streamlit as st
from transformers import pipeline
@st.cache_resource
def load_model():
return pipeline("text-generation", model="PeterBrendan/pbjs_gpt2")
def main():
if "generated_widget_id" not in st.session_state:
st.session_state["generated_widget_id"] = None
st.title("Prebid Config Generator")
st.write("Enter a Prebid config setting, such as 'bidderTimeout', and get a generated Prebid config output starting from that setting onward. Using '{' will generate a Prebid config from the beginning.")
st.subheader("Intended Uses")
st.write("This model is designed to assist publishers in understanding and exploring how other publishers configure their Prebid settings. It can serve as a valuable reference to gain insights into common configurations, best practices, and different approaches used by publishers across various domains.")
st.write("To learn more about the model, visit the [pbjs_gpt2 model page](https://huggingface.co/PeterBrendan/pbjs_gpt2). You can also refer to the [official Prebid Documentation on pbjs.setConfig](https://docs.prebid.org/dev-docs/publisher-api-reference/setConfig.html) for more information.")
st.write("Note: The model may take some time to generate the output.")
# Default prompts
default_prompts = ["{", "bidderTimeout", "bidderSequence", "Usebidcache", "customPriceBucket"]
# Create a selectbox for default prompts
default_prompt = st.selectbox("Choose a default prompt:", default_prompts)
# Create a text input field for custom prompt
custom_prompt = st.text_input("Enter a custom prompt:", "")
# Check if a default prompt is selected
if default_prompt:
user_input = default_prompt
else:
user_input = custom_prompt
# Check if the user input is empty
if user_input:
# Load the Hugging Face model
generator = load_model()
# Display 'Generating Output' message
output_placeholder = st.empty()
with output_placeholder:
st.write("Generating Output...")
# Generate text based on user input
generated_text = generator(user_input, max_length=700, num_return_sequences=1)[0]["generated_text"]
# Clear 'Generating Output' message and display the generated text
output_placeholder.empty()
st.write("Generated Text:")
st.write(generated_text)
# Run the app
if __name__ == "__main__":
main()
|