eaglelandsonce commited on
Commit
389f6b6
·
verified ·
1 Parent(s): b4ef92f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -52
app.py CHANGED
@@ -1,61 +1,36 @@
 
1
  import streamlit as st
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient()
8
 
9
- def respond(message, history, system_message, max_tokens, temperature, top_p):
10
- messages = [{"role": "system", "content": system_message}]
11
- for user_msg, assistant_msg in history:
12
- if user_msg:
13
- messages.append({"role": "user", "content": user_msg})
14
- if assistant_msg:
15
- messages.append({"role": "assistant", "content": assistant_msg})
16
- messages.append({"role": "user", "content": message})
17
-
18
- response = ""
19
- try:
20
- for message in client.chat_completion(
21
- model="mistralai/Codestral-22B-v0.1",
22
- messages=messages,
23
- max_tokens=max_tokens,
24
- stream=True,
25
- temperature=temperature,
26
- top_p=top_p,
27
- ):
28
- token = message.choices[0].delta.content
29
- response += token
30
- yield response
31
- except Exception as e:
32
- yield f"Error: {e}"
33
 
34
  # Streamlit interface
35
- st.title("Chat with Codestral Model")
36
- system_message = st.text_input("System message", value="You are an expert python coder with in-depth knowledge of langchain.")
37
- max_tokens = st.slider("Max new tokens", min_value=1, max_value=2048, value=2048, step=1)
38
- temperature = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.6, step=0.1)
39
- top_p = st.slider("Top-p (nucleus sampling)", min_value=0.1, max_value=1.0, value=0.95, step=0.05)
40
-
41
- # Initialize history in session state
42
- if "history" not in st.session_state:
43
- st.session_state.history = []
44
 
45
- def get_response():
46
- user_input = st.session_state.user_input
47
- if user_input:
48
- st.session_state.history.append((user_input, ""))
49
- response_generator = respond(user_input, st.session_state.history, system_message, max_tokens, temperature, top_p)
50
- response = ""
51
- for r in response_generator:
52
- response = r
53
- st.session_state.history[-1] = (user_input, response)
54
- st.session_state.user_input = "" # Clear input after sending
55
-
56
- # Display chat history
57
- st.text_area("Chat History", value="\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in st.session_state.history]), height=300, key="chat_history")
58
 
59
  # User input
60
- st.text_input("Your message:", key="user_input", on_change=get_response)
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import necessary libraries
2
  import streamlit as st
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # Load the model and tokenizer
6
+ model_name = "mistralai/Codestral-22B-v0.1"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ # Initialize the pipeline
11
+ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # Streamlit interface
14
+ st.title("Codestral Text Generation")
 
 
 
 
 
 
 
 
15
 
16
+ st.write("""
17
+ This is a text generation application using the Codestral model from Mistral AI.
18
+ Enter your prompt below and generate text.
19
+ """)
 
 
 
 
 
 
 
 
 
20
 
21
  # User input
22
+ user_input = st.text_area("Enter your prompt here:", "")
23
 
24
+ if st.button("Generate"):
25
+ if user_input:
26
+ with st.spinner("Generating text..."):
27
+ # Generate text using the model
28
+ generated_text = text_generator(user_input, max_length=100, num_return_sequences=1)
29
+ st.write("### Generated Text")
30
+ st.write(generated_text[0]['generated_text'])
31
+ else:
32
+ st.warning("Please enter a prompt to generate text.")
33
+
34
+ # Run the Streamlit app
35
+ if __name__ == '__main__':
36
+ st.run()