Waseem771 commited on
Commit
732917f
·
verified ·
1 Parent(s): 9828300

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -13
app.py CHANGED
@@ -1,39 +1,43 @@
 
1
  from langchain import LLMChain
2
  from langchain.chat_models import ChatOpenAI
3
  from langchain.prompts import ChatPromptTemplate
4
- from langchain.output_parsers import StrOutputParser
5
- import streamlit as st
6
- import os
7
  from dotenv import load_dotenv
 
8
 
9
  # Load environment variables
10
  load_dotenv()
11
 
12
- # Set environment variables
13
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
14
  os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
15
 
16
- # Initialize the LLM (e.g., using OpenAI or Hugging Face)
17
- llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=os.getenv("OPENAI_API_KEY"))
 
 
 
 
 
18
 
19
- # Prompt Template
20
  prompt = ChatPromptTemplate.from_messages(
21
  [
22
- ("system", "You are a helpful assistant. Please respond to the user queries."),
23
  ("user", "Question: {question}")
24
  ]
25
  )
26
 
27
- # Create LLM Chain
28
  chain = LLMChain(llm=llm, prompt=prompt, output_key="response")
29
 
30
- # Streamlit app setup
31
- st.title('Langchain Demo with Hugging Face API')
32
 
33
  # User input
34
- input_text = st.text_input("Search the topic you want")
35
 
36
- # Display result when user inputs text
37
  if input_text:
38
  try:
39
  response = chain.run({"question": input_text})
 
1
+ import streamlit as st
2
  from langchain import LLMChain
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.prompts import ChatPromptTemplate
 
 
 
5
  from dotenv import load_dotenv
6
+ import os
7
 
8
  # Load environment variables
9
  load_dotenv()
10
 
11
+ # Set LangChain tracing (optional)
12
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
13
  os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
14
 
15
+ # Initialize the Hugging Face LLaMA 2 model via LangChain
16
+ llm = ChatOpenAI(
17
+ model_name="meta-llama/Llama-2-7b-chat-hf",
18
+ temperature=0.7,
19
+ max_tokens=512,
20
+ openai_api_key=os.getenv("OPENAI_API_KEY") # If using OpenAI; otherwise, remove
21
+ )
22
 
23
+ # Define the prompt template
24
  prompt = ChatPromptTemplate.from_messages(
25
  [
26
+ ("system", "You are a helpful assistant."),
27
  ("user", "Question: {question}")
28
  ]
29
  )
30
 
31
+ # Create the LLM Chain
32
  chain = LLMChain(llm=llm, prompt=prompt, output_key="response")
33
 
34
+ # Streamlit App Interface
35
+ st.title('LangChain Demo with LLaMA 2 on Hugging Face')
36
 
37
  # User input
38
+ input_text = st.text_input("Enter your question:")
39
 
40
+ # Display the response
41
  if input_text:
42
  try:
43
  response = chain.run({"question": input_text})