WilliamGazeley commited on
Commit
fd872b2
·
verified ·
1 Parent(s): de1c7b8

Implement simple comparison UI

Browse files
Files changed (1) hide show
  1. app.py +39 -16
app.py CHANGED
@@ -1,24 +1,47 @@
1
  import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- # Load the model and tokenizer
5
- model_name = "InvestmentResearchAI/LLM-ADE_tiny-v0.001"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  def generate_response(input_text):
10
- """Generate response from the model based on the input text."""
11
- inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
12
- output = model.generate(**inputs, max_length=512, num_return_sequences=1)
13
- response = tokenizer.decode(output[0], skip_special_tokens=True)
14
- return response
 
 
 
 
 
15
 
16
- # Streamlit interface
17
- st.title("IRAI LLM-ADE Model")
18
- user_input = st.text_area("Enter your text here:", "")
19
  if st.button("Generate"):
20
  if user_input:
21
- response = generate_response(user_input)
22
- st.text_area("Model Response:", response, height=300)
 
 
 
 
 
 
23
  else:
24
- st.warning("Please enter some text to generate a response.")
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
+ from concurrent.futures import ProcessPoolExecutor
4
 
5
+ prompt_template = (
6
+ "<|system|>\n"
7
+ "You are a friendly chatbot who always gives helpful, detailed, and polite answers.</s>\n"
8
+ "<|user|>\n"
9
+ "{input_text}</s>\n"
10
+ "<|assistant|>\n"
11
+ )
12
+
13
+ def generate_base_response(input_text):
14
+ base_pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", max_length=512)
15
+ return base_pipe(input_text)[0]["generated_text"]
16
+
17
+ def generate_irai_response(input_text):
18
+ irai_pipe = pipeline("text-generation", model="InvestmentResearchAI/LLM-ADE_tiny-v0.001", max_length=512)
19
+ return irai_pipe(prompt_template.format(input_text=input_text))[0]["generated_text"].split("<|assistant|>")[1].strip()
20
 
21
  def generate_response(input_text):
22
+ with ProcessPoolExecutor() as executor:
23
+ try:
24
+ future_base = executor.submit(generate_base_response, input_text)
25
+ future_irai = executor.submit(generate_irai_response, input_text)
26
+ base_resp = future_base.result()
27
+ irai_resp = future_irai.result()
28
+ except Exception as e:
29
+ st.error(f"An error occurred: {e}")
30
+ return None, None
31
+ return base_resp, irai_resp
32
 
33
+ st.title("IRAI LLM-ADE Model vs Base Model")
34
+ user_input = st.text_area("Enter a financial question:", "")
 
35
  if st.button("Generate"):
36
  if user_input:
37
+ base_response, irai_response = generate_response(user_input)
38
+ col1, col2 = st.columns(2) # Updated to use `st.columns`
39
+ with col1:
40
+ st.header("Base Model Response")
41
+ st.text_area("", base_response, height=300)
42
+ with col2:
43
+ st.header("IRAI LLM-ADE Model Response")
44
+ st.text_area("", irai_response, height=300)
45
  else:
46
+ st.warning("Please enter some text to generate a response.")
47
+