File size: 1,259 Bytes
0429aca
 
9413ff7
0429aca
9413ff7
 
 
 
0429aca
 
9413ff7
0429aca
9413ff7
 
0429aca
9413ff7
0429aca
 
9413ff7
d76ea0c
9413ff7
 
 
 
 
 
d76ea0c
9413ff7
0429aca
d76ea0c
9413ff7
 
0429aca
9413ff7
d76ea0c
9413ff7
0429aca
9413ff7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import os
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the fine-tuned model and tokenizer
model_path = "path/to/your/fine-tuned-model"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)

# Streamlit app layout
st.title("๐Ÿค– Fine-tuned Arabic Mistral Model ๐Ÿง™")

# Input text area for user query
user_query = st.text_area("โœจ Enter your query in Arabic:", height=100)

# Sliders for temperature and max length (as in your original code)

# Button to trigger the query
if st.button("๐Ÿช„ Generate Response"):
  if user_query:
      # Tokenize input and generate response
      inputs = tokenizer(user_query, return_tensors="pt")
      outputs = model.generate(
          inputs.input_ids, 
          max_length=max_length, 
          temperature=temperature
      )
      response = tokenizer.decode(outputs[0], skip_special_tokens=True)

      # Display the response
      st.markdown("๐Ÿ”ฎ Response from Fine-tuned Arabic Model:")
      st.write(response)

      # Save query and response to session state (as in your original code)
  else:
      st.write("๐Ÿšจ Please enter a query.")

# History display and clear button (as in your original code)