Waseem771 commited on
Commit
9828300
·
verified ·
1 Parent(s): 7e49f39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -1,7 +1,7 @@
1
- from langchain_openai import ChatOpenAI
2
- from langchain_core.prompts import ChatPromptTemplate
3
- from langchain_core.output_parsers import StrOutputParser
4
- from langchain_community.llms import Ollama
5
  import streamlit as st
6
  import os
7
  from dotenv import load_dotenv
@@ -13,29 +13,30 @@ load_dotenv()
13
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
14
  os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
15
 
 
 
 
16
  # Prompt Template
17
  prompt = ChatPromptTemplate.from_messages(
18
  [
19
- ("system", "You are a helpful assistant. Please respond to the user queries"),
20
  ("user", "Question: {question}")
21
  ]
22
  )
23
 
 
 
 
24
  # Streamlit app setup
25
- st.title('Langchain Demo With LLAMA2 API')
26
 
27
  # User input
28
  input_text = st.text_input("Search the topic you want")
29
 
30
- # Ollama LLM (ensure the model is available, or access it through Hugging Face API)
31
- llm = Ollama(model="llama2")
32
- output_parser = StrOutputParser()
33
- chain = prompt | llm | output_parser
34
-
35
  # Display result when user inputs text
36
  if input_text:
37
  try:
38
- response = chain.invoke({"question": input_text})
39
  st.write(response)
40
  except Exception as e:
41
  st.error(f"Error: {e}")
 
1
+ from langchain import LLMChain
2
+ from langchain.chat_models import ChatOpenAI
3
+ from langchain.prompts import ChatPromptTemplate
4
+ from langchain.output_parsers import StrOutputParser
5
  import streamlit as st
6
  import os
7
  from dotenv import load_dotenv
 
13
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
14
  os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
15
 
16
+ # Initialize the LLM (e.g., using OpenAI or Hugging Face)
17
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=os.getenv("OPENAI_API_KEY"))
18
+
19
  # Prompt Template
20
  prompt = ChatPromptTemplate.from_messages(
21
  [
22
+ ("system", "You are a helpful assistant. Please respond to the user queries."),
23
  ("user", "Question: {question}")
24
  ]
25
  )
26
 
27
+ # Create LLM Chain
28
+ chain = LLMChain(llm=llm, prompt=prompt, output_key="response")
29
+
30
  # Streamlit app setup
31
+ st.title('Langchain Demo with Hugging Face API')
32
 
33
  # User input
34
  input_text = st.text_input("Search the topic you want")
35
 
 
 
 
 
 
36
  # Display result when user inputs text
37
  if input_text:
38
  try:
39
+ response = chain.run({"question": input_text})
40
  st.write(response)
41
  except Exception as e:
42
  st.error(f"Error: {e}")