Vineedhar commited on
Commit
f147104
·
verified ·
1 Parent(s): 0c1acec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
- from langchain_ollama import OllamaLLM
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
  def main():
6
  # Set up the page
@@ -20,19 +19,19 @@ def main():
20
  # Input for user-provided data
21
  prompt = st.text_area("Enter the prompt here:")
22
 
23
- # Initialize the models
24
- gemma_model = OllamaLLM(model='gemma:2b')
25
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it")
26
  model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it")
27
 
 
 
28
  # Button to generate the nudge
29
  if st.button("Generate Nudge"):
30
- if S_boss.strip():
31
  with st.spinner("Generating nudges..."):
32
- # Generate the response using Ollama LLM
33
- response = gemma_model.invoke(input=f"I want you to analyze the {prompt}. Which contains top 3 strengths or weaknesses of a person being assessed. You will generate nudges for improving upon these strengths or fixing upon these weaknesses. If you don't find any data, just respond as - No data available.")
34
  st.success("Nudges generated successfully!")
35
- st.text_area("Generated Nudges:", response, height=200)
36
  else:
37
  st.warning("Please enter data to generate nudges.")
38
 
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
 
3
 
4
  def main():
5
  # Set up the page
 
19
  # Input for user-provided data
20
  prompt = st.text_area("Enter the prompt here:")
21
 
22
+ # Load model directly
 
23
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it")
24
  model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it")
25
 
26
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
27
+
28
  # Button to generate the nudge
29
  if st.button("Generate Nudge"):
30
+ if prompt.strip():
31
  with st.spinner("Generating nudges..."):
32
+ response = pipe(messages)
 
33
  st.success("Nudges generated successfully!")
34
+ st.text_area("Pipeline Response:", str(response), height=200)
35
  else:
36
  st.warning("Please enter data to generate nudges.")
37