import os import streamlit as st from huggingface_hub import InferenceClient client = InferenceClient(api_key=os.environ['HF_KEY']) messages = [ { "role": "user", "content": """You are an assistant used for providing Wordle suggestions. I will give you information on my current Wordle results that will allow you to suggest the next word for me to guess. You dont need any real-time Wordle data to suggest words. If you don't have a 5-letter word suggestion, just say that you don't have one. Do NOT give any word suggestions that are not 5 letters long. Suggestions cannot be less than or greater than 5 letters, they must be exactly 5 letters.""" } ] initial = client.chat.completions.create( model="HuggingFaceTB/SmolLM2-1.7B-Instruct", messages=messages, max_tokens=128, stream=True ) st.write('Please input your current Wordle results. For example, whether they are a single letter correct, two letters correct, three letters correct, or if none are correct. This will help provide the best suggestions for the next word to guess.') input = st.text_area("Wordle information:") if input: messages.append( { "role": "user", "content": input } ) stream = client.chat.completions.create( model="HuggingFaceTB/SmolLM2-1.7B-Instruct", messages=messages, max_tokens=128, stream=True ) response = '' for chunk in stream: response += chunk.choices[0].delta.content st.write(response)